Compare commits

..

1 Commits

Author SHA1 Message Date
mr-tz
481ae685e1 move sigs to capa directory 2024-01-18 12:31:55 +01:00
280 changed files with 3484 additions and 23216 deletions

View File

@@ -57,7 +57,7 @@ When we make a significant decision in how we maintain the project and what we c
we will document it in the [capa issues tracker](https://github.com/mandiant/capa/issues).
This is the best place review our discussions about what/how/why we do things in the project.
If you have a question, check to see if it is documented there.
If it is *not* documented there, or you can't find an answer, please open an issue.
If it is *not* documented there, or you can't find an answer, please open a issue.
We'll link to existing issues when appropriate to keep discussions in one place.
## How Can I Contribute?

View File

@@ -4,6 +4,3 @@ updates:
directory: "/"
schedule:
interval: "weekly"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-patch"]

4
.github/flake8.ini vendored
View File

@@ -10,8 +10,6 @@ extend-ignore =
F811,
# E501 line too long (prefer black)
E501,
# E701 multiple statements on one line (colon) (prefer black, see https://github.com/psf/black/issues/4173)
E701,
# B010 Do not call setattr with a constant attribute value
B010,
# G200 Logging statement uses exception in arguments
@@ -40,4 +38,4 @@ per-file-ignores =
copyright-check = True
copyright-min-file-size = 1
copyright-regexp = Copyright \(C\) \d{4} Mandiant, Inc. All Rights Reserved.
copyright-regexp = Copyright \(C\) 2023 Mandiant, Inc. All Rights Reserved.

View File

@@ -1,5 +1,11 @@
[mypy]
[mypy-halo.*]
ignore_missing_imports = True
[mypy-tqdm.*]
ignore_missing_imports = True
[mypy-ruamel.*]
ignore_missing_imports = True

View File

@@ -24,7 +24,7 @@ excludedimports = [
"pyqtwebengine",
# the above are imported by these viv modules.
# so really, we'd want to exclude these submodules of viv.
# but i don't think this works.
# but i dont think this works.
"vqt",
"vdb.qt",
"envi.qt",

View File

@@ -1,18 +1,10 @@
# -*- mode: python -*-
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
import sys
import os.path
import subprocess
import capa.rules.cache
import wcwidth
from pathlib import Path
# SPECPATH is a global variable which points to .spec file path
capa_dir = Path(SPECPATH).parent.parent
rules_dir = capa_dir / 'rules'
cache_dir = capa_dir / 'cache'
if not capa.rules.cache.generate_rule_cache(rules_dir, cache_dir):
sys.exit(-1)
a = Analysis(
# when invoking pyinstaller from the project root,
@@ -26,8 +18,15 @@ a = Analysis(
# this gets invoked from the directory of the spec file,
# i.e. ./.github/pyinstaller
("../../rules", "rules"),
("../../sigs", "sigs"),
("../../capa/sigs", "sigs"),
("../../cache", "cache"),
# capa.render.default uses tabulate that depends on wcwidth.
# it seems wcwidth uses a json file `version.json`
# and this doesn't get picked up by pyinstaller automatically.
# so we manually embed the wcwidth resources here.
#
# ref: https://stackoverflow.com/a/62278462/87207
(os.path.dirname(wcwidth.__file__), "wcwidth"),
],
# when invoking pyinstaller from the project root,
# this gets run from the project root.
@@ -40,6 +39,11 @@ a = Analysis(
"tkinter",
"_tkinter",
"Tkinter",
# tqdm provides renderers for ipython,
# however, this drags in a lot of dependencies.
# since we don't spawn a notebook, we can safely remove these.
"IPython",
"ipywidgets",
# these are pulled in by networkx
# but we don't need to compute the strongly connected components.
"numpy",
@@ -57,10 +61,7 @@ a = Analysis(
"qt5",
"pyqtwebengine",
"pyasn1",
# don't pull in Binary Ninja/IDA bindings that should
# only be installed locally.
"binaryninja",
"ida",
],
)

8
.github/ruff.toml vendored
View File

@@ -1,16 +1,16 @@
# Enable the pycodestyle (`E`) and Pyflakes (`F`) rules by default.
# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
# McCabe complexity (`C901`) by default.
lint.select = ["E", "F"]
select = ["E", "F"]
# Allow autofix for all enabled rules (when `--fix`) is provided.
lint.fixable = ["ALL"]
lint.unfixable = []
fixable = ["ALL"]
unfixable = []
# E402 module level import not at top of file
# E722 do not use bare 'except'
# E501 line too long
lint.ignore = ["E402", "E722", "E501"]
ignore = ["E402", "E722", "E501"]
line-length = 120

View File

@@ -3,10 +3,6 @@ name: build
on:
pull_request:
branches: [ master ]
paths-ignore:
- 'web/**'
- 'doc/**'
- '**.md'
release:
types: [edited, published]
@@ -30,24 +26,24 @@ jobs:
python_version: 3.8
- os: ubuntu-20.04
artifact_name: capa
asset_name: linux-py312
python_version: 3.12
asset_name: linux-py311
python_version: 3.11
- os: windows-2019
artifact_name: capa.exe
asset_name: windows
python_version: 3.8
- os: macos-12
- os: macos-11
# use older macOS for assumed better portability
artifact_name: capa
asset_name: macos
python_version: 3.8
steps:
- name: Checkout capa
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
submodules: true
- name: Set up Python ${{ matrix.python_version }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ matrix.python_version }}
- if: matrix.os == 'ubuntu-20.04'
@@ -55,22 +51,22 @@ jobs:
- name: Upgrade pip, setuptools
run: python -m pip install --upgrade pip setuptools
- name: Install capa with build requirements
run: |
pip install -r requirements.txt
pip install -e .[build]
run: pip install -e .[build]
- name: Cache the rule set
run: python ./scripts/cache-ruleset.py ./rules/ ./cache/
- name: Build standalone executable
run: pyinstaller --log-level DEBUG .github/pyinstaller/pyinstaller.spec
- name: Does it run (PE)?
run: dist/capa -d "tests/data/Practical Malware Analysis Lab 01-01.dll_"
run: dist/capa "tests/data/Practical Malware Analysis Lab 01-01.dll_"
- name: Does it run (Shellcode)?
run: dist/capa -d "tests/data/499c2a85f6e8142c3f48d4251c9c7cd6.raw32"
run: dist/capa "tests/data/499c2a85f6e8142c3f48d4251c9c7cd6.raw32"
- name: Does it run (ELF)?
run: dist/capa -d "tests/data/7351f8a40c5450557b24622417fc478d.elf_"
run: dist/capa "tests/data/7351f8a40c5450557b24622417fc478d.elf_"
- name: Does it run (CAPE)?
run: |
7z e "tests/data/dynamic/cape/v2.2/d46900384c78863420fb3e297d0a2f743cd2b6b3f7f82bf64059a168e07aceb7.json.gz"
dist/capa -d "d46900384c78863420fb3e297d0a2f743cd2b6b3f7f82bf64059a168e07aceb7.json"
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
dist/capa "d46900384c78863420fb3e297d0a2f743cd2b6b3f7f82bf64059a168e07aceb7.json"
- uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: ${{ matrix.asset_name }}
path: dist/${{ matrix.artifact_name }}
@@ -88,13 +84,13 @@ jobs:
asset_name: linux
- os: ubuntu-22.04
artifact_name: capa
asset_name: linux-py312
asset_name: linux-py311
- os: windows-2022
artifact_name: capa.exe
asset_name: windows
steps:
- name: Download ${{ matrix.asset_name }}
uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: ${{ matrix.asset_name }}
- name: Set executable flag
@@ -114,7 +110,7 @@ jobs:
include:
- asset_name: linux
artifact_name: capa
- asset_name: linux-py312
- asset_name: linux-py311
artifact_name: capa
- asset_name: windows
artifact_name: capa.exe
@@ -122,7 +118,7 @@ jobs:
artifact_name: capa
steps:
- name: Download ${{ matrix.asset_name }}
uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: ${{ matrix.asset_name }}
- name: Set executable flag

View File

@@ -7,8 +7,7 @@ on:
pull_request_target:
types: [opened, edited, synchronize]
permissions:
pull-requests: write
permissions: read-all
jobs:
check_changelog:
@@ -20,7 +19,7 @@ jobs:
steps:
- name: Get changed files
id: files
uses: Ana06/get-changed-files@25f79e676e7ea1868813e21465014798211fad8c # v2.3.0
uses: Ana06/get-changed-files@e0c398b7065a8d84700c471b6afc4116d1ba4e96 # v2.2.0
- name: check changelog updated
id: changelog_updated
env:
@@ -30,14 +29,14 @@ jobs:
echo $FILES | grep -qF 'CHANGELOG.md' || echo $PR_BODY | grep -qiF "$NO_CHANGELOG"
- name: Reject pull request if no CHANGELOG update
if: ${{ always() && steps.changelog_updated.outcome == 'failure' }}
uses: Ana06/automatic-pull-request-review@76aaf9b15b116a54e1da7a28a46f91fe089600bf # v0.2.0
uses: Ana06/automatic-pull-request-review@0cf4e8a17ba79344ed3fdd7fed6dd0311d08a9d4 # v0.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
event: REQUEST_CHANGES
body: "Please add bug fixes, new features, breaking changes and anything else you think is worthwhile mentioning to the `master (unreleased)` section of CHANGELOG.md. If no CHANGELOG update is needed add the following to the PR description: `${{ env.NO_CHANGELOG }}`"
allow_duplicate: false
- name: Dismiss previous review if CHANGELOG update
uses: Ana06/automatic-pull-request-review@76aaf9b15b116a54e1da7a28a46f91fe089600bf # v0.2.0
uses: Ana06/automatic-pull-request-review@0cf4e8a17ba79344ed3fdd7fed6dd0311d08a9d4 # v0.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
event: DISMISS

View File

@@ -17,21 +17,20 @@ jobs:
permissions:
id-token: write
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Set up Python
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: '3.8'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -e .[build]
- name: build package
run: |
python -m build
- name: upload package artifacts
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
path: dist/*
- name: publish package

View File

@@ -32,12 +32,12 @@ jobs:
steps:
- name: "Checkout code"
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # v2.0.6
with:
results_file: results.sarif
results_format: sarif
@@ -59,7 +59,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0
with:
name: SARIF file
path: results.sarif
@@ -67,6 +67,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@8a470fddafa5cbb6266ee11b37ef4d8aae19c571 # v3.24.6
uses: github/codeql-action/upload-sarif@807578363a7869ca324a79039e6db9c843e0e100 # v2.1.27
with:
sarif_file: results.sarif

View File

@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout capa-rules
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
repository: mandiant/capa-rules
token: ${{ secrets.CAPA_TOKEN }}
@@ -25,7 +25,7 @@ jobs:
git tag $name -m "https://github.com/mandiant/capa/releases/$name"
# TODO update branch name-major=${name%%.*}
- name: Push tag to capa-rules
uses: ad-m/github-push-action@d91a481090679876dfc4178fef17f286781251df # v0.8.0
uses: ad-m/github-push-action@0fafdd62b84042d49ec0cb92d9cac7f7ce4ec79e # master
with:
repository: mandiant/capa-rules
github_token: ${{ secrets.CAPA_TOKEN }}

View File

@@ -1,22 +1,10 @@
name: CI
# tests.yml workflow will run for all changes except:
# any file or directory under web/ or doc/
# any Markdown (.md) file anywhere in the repository
on:
push:
branches: [ master ]
paths-ignore:
- 'web/**'
- 'doc/**'
- '**.md'
pull_request:
branches: [ master ]
paths-ignore:
- 'web/**'
- 'doc/**'
- '**.md'
permissions: read-all
@@ -29,7 +17,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout capa
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
# The sync GH action in capa-rules relies on a single '- *$' in the CHANGELOG file
- name: Ensure CHANGELOG has '- *$'
run: |
@@ -40,16 +28,14 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout capa
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
# use latest available python to take advantage of best performance
- name: Set up Python 3.11
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: "3.11"
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
run: pip install -e .[dev]
- name: Lint with ruff
run: pre-commit run ruff
- name: Lint with isort
@@ -59,25 +45,21 @@ jobs:
- name: Lint with flake8
run: pre-commit run flake8 --hook-stage manual
- name: Check types with mypy
run: pre-commit run mypy --hook-stage manual
- name: Check imports against dependencies
run: pre-commit run deptry --hook-stage manual
run: pre-commit run mypy --hook-stage manual
rule_linter:
runs-on: ubuntu-20.04
steps:
- name: Checkout capa with submodules
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
submodules: recursive
- name: Set up Python 3.11
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: "3.11"
- name: Install capa
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
run: pip install -e .[dev]
- name: Run rule linter
run: python scripts/lint.py rules/
@@ -88,7 +70,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-20.04, windows-2019, macos-12]
os: [ubuntu-20.04, windows-2019, macos-11]
# across all operating systems
python-version: ["3.8", "3.11"]
include:
@@ -101,20 +83,18 @@ jobs:
python-version: "3.10"
steps:
- name: Checkout capa with submodules
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
submodules: recursive
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ matrix.python-version }}
- name: Install pyyaml
if: matrix.os == 'ubuntu-20.04'
run: sudo apt-get install -y libyaml-dev
- name: Install capa
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
run: pip install -e .[dev]
- name: Run tests (fast)
# this set of tests runs about 80% of the cases in 20% of the time,
# and should catch most errors quickly.
@@ -126,22 +106,22 @@ jobs:
name: Binary Ninja tests for ${{ matrix.python-version }}
env:
BN_SERIAL: ${{ secrets.BN_SERIAL }}
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
needs: [tests]
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.11"]
python-version: ["3.8", "3.11"]
steps:
- name: Checkout capa with submodules
# do only run if BN_SERIAL is available, have to do this in every step, see https://github.com/orgs/community/discussions/26726#discussioncomment-3253118
if: ${{ env.BN_SERIAL != 0 }}
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
submodules: recursive
- name: Set up Python ${{ matrix.python-version }}
if: ${{ env.BN_SERIAL != 0 }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ matrix.python-version }}
- name: Install pyyaml
@@ -149,9 +129,7 @@ jobs:
run: sudo apt-get install -y libyaml-dev
- name: Install capa
if: ${{ env.BN_SERIAL != 0 }}
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
run: pip install -e .[dev]
- name: install Binary Ninja
if: ${{ env.BN_SERIAL != 0 }}
run: |
@@ -175,23 +153,31 @@ jobs:
matrix:
python-version: ["3.8", "3.11"]
java-version: ["17"]
ghidra-version: ["11.0.1"]
public-version: ["PUBLIC_20240130"] # for ghidra releases
ghidrathon-version: ["4.0.0"]
gradle-version: ["7.3"]
ghidra-version: ["10.3"]
public-version: ["PUBLIC_20230510"] # for ghidra releases
jep-version: ["4.1.1"]
ghidrathon-version: ["3.0.0"]
steps:
- name: Checkout capa with submodules
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ matrix.python-version }}
- name: Set up Java ${{ matrix.java-version }}
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0
uses: actions/setup-java@5ffc13f4174014e2d4d4572b3d74c3fa61aeb2c2 # v3
with:
distribution: 'temurin'
java-version: ${{ matrix.java-version }}
- name: Set up Gradle ${{ matrix.gradle-version }}
uses: gradle/gradle-build-action@40b6781dcdec2762ad36556682ac74e31030cfe2 # v2.5.1
with:
gradle-version: ${{ matrix.gradle-version }}
- name: Install Jep ${{ matrix.jep-version }}
run : pip install jep==${{ matrix.jep-version }}
- name: Install Ghidra ${{ matrix.ghidra-version }}
run: |
mkdir ./.github/ghidra
@@ -200,17 +186,14 @@ jobs:
- name: Install Ghidrathon
run : |
mkdir ./.github/ghidrathon
wget "https://github.com/mandiant/Ghidrathon/releases/download/v${{ matrix.ghidrathon-version }}/Ghidrathon-v${{ matrix.ghidrathon-version}}.zip" -O ./.github/ghidrathon/ghidrathon-v${{ matrix.ghidrathon-version }}.zip
unzip .github/ghidrathon/ghidrathon-v${{ matrix.ghidrathon-version }}.zip -d .github/ghidrathon/
python -m pip install -r .github/ghidrathon/requirements.txt
python .github/ghidrathon/ghidrathon_configure.py $(pwd)/.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC
unzip .github/ghidrathon/Ghidrathon-v${{ matrix.ghidrathon-version }}.zip -d .github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC/Ghidra/Extensions
curl -o ./.github/ghidrathon/ghidrathon-${{ matrix.ghidrathon-version }}.zip "https://codeload.github.com/mandiant/Ghidrathon/zip/refs/tags/v${{ matrix.ghidrathon-version }}"
unzip .github/ghidrathon/ghidrathon-${{ matrix.ghidrathon-version }}.zip -d .github/ghidrathon/
gradle -p ./.github/ghidrathon/Ghidrathon-${{ matrix.ghidrathon-version }}/ -PGHIDRA_INSTALL_DIR=$(pwd)/.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC
unzip .github/ghidrathon/Ghidrathon-${{ matrix.ghidrathon-version }}/dist/*.zip -d .github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC/Ghidra/Extensions
- name: Install pyyaml
run: sudo apt-get install -y libyaml-dev
- name: Install capa
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
run: pip install -e .[dev]
- name: Run tests
run: |
mkdir ./.github/ghidra/project
@@ -218,4 +201,4 @@ jobs:
cat ../output.log
exit_code=$(cat ../output.log | grep exit | awk '{print $NF}')
exit $exit_code

View File

@@ -1,134 +0,0 @@
name: deploy web to GitHub Pages
on:
push:
branches: [ master ]
paths:
- 'web/**'
# Allows to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets the GITHUB_TOKEN permissions to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Allow one concurrent deployment
concurrency:
group: 'pages'
cancel-in-progress: true
jobs:
build-landing-page:
name: Build landing page
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/upload-artifact@v4
with:
name: landing-page
path: './web/public'
build-explorer:
name: Build capa Explorer Web
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: 'recursive'
fetch-depth: 1
show-progress: true
- name: Set up Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
with:
node-version: 20
cache: 'npm'
cache-dependency-path: './web/explorer/package-lock.json'
- name: Install dependencies
run: npm ci
working-directory: ./web/explorer
- name: Generate release bundle
run: npm run build:bundle
working-directory: ./web/explorer
- name: Zip release bundle
run: zip -r public/capa-explorer-web.zip capa-explorer-web
working-directory: ./web/explorer
- name: Build
run: npm run build
working-directory: ./web/explorer
- uses: actions/upload-artifact@v4
with:
name: explorer
path: './web/explorer/dist'
build-rules:
name: Build rules site
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v4
with:
submodules: 'recursive'
# full depth so that capa-rules has a full history
# and we can construct a timeline of rule updates.
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: '3.12'
- uses: extractions/setup-just@v2
- name: Install pagefind
uses: supplypike/setup-bin@v4
with:
uri: "https://github.com/CloudCannon/pagefind/releases/download/v1.1.0/pagefind-v1.1.0-x86_64-unknown-linux-musl.tar.gz"
name: "pagefind"
version: "1.1.0"
- name: Install dependencies
working-directory: ./web/rules
run: pip install -r requirements.txt
- name: Build the website
working-directory: ./web/rules
run: just build
- name: Index the website
working-directory: ./web/rules
run: pagefind --site "public"
# upload the build website to artifacts
# so that we can download and inspect, if desired.
- uses: actions/upload-artifact@v4
with:
name: rules
path: './web/rules/public'
deploy:
name: Deploy site to GitHub Pages
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: [build-landing-page, build-explorer, build-rules]
steps:
- uses: actions/download-artifact@v4
with:
name: landing-page
path: './public/'
- uses: actions/download-artifact@v4
with:
name: explorer
path: './public/explorer'
- uses: actions/download-artifact@v4
with:
name: rules
path: './public/rules'
- name: Setup Pages
uses: actions/configure-pages@v4
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
path: './public'
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

View File

@@ -1,42 +0,0 @@
name: Capa Explorer Web tests
on:
pull_request:
branches: [ master ]
paths:
- 'web/explorer/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: 'recursive'
fetch-depth: 1
show-progress: true
- name: Set up Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
with:
node-version: 20
cache: 'npm'
cache-dependency-path: './web/explorer/package-lock.json'
- name: Install dependencies
run: npm ci
working-directory: ./web/explorer
- name: Lint
run: npm run lint
working-directory: ./web/explorer
- name: Format
run: npm run format:check
working-directory: ./web/explorer
- name: Run unit tests
run: npm run test
working-directory: ./web/explorer

2
.gitignore vendored
View File

@@ -126,5 +126,3 @@ Pipfile.lock
.github/binja/binaryninja
.github/binja/download_headless.py
.github/binja/BinaryNinja-headless.zip
justfile
data/

6
.gitmodules vendored
View File

@@ -1,6 +1,8 @@
[submodule "rules"]
path = rules
url = ../../mandiant/capa-rules.git
url = ../capa-rules.git
branch = dynamic-syntax
[submodule "tests/data"]
path = tests/data
url = ../../mandiant/capa-testfiles.git
url = ../capa-testfiles.git
branch = dynamic-feature-extractor

View File

@@ -1,25 +0,0 @@
@isort:
pre-commit run isort --show-diff-on-failure --all-files
@black:
pre-commit run black --show-diff-on-failure --all-files
@ruff:
pre-commit run ruff --all-files
@flake8:
pre-commit run flake8 --hook-stage manual --all-files
@mypy:
pre-commit run mypy --hook-stage manual --all-files
@deptry:
pre-commit run deptry --hook-stage manual --all-files
@lint:
-just isort
-just black
-just ruff
-just flake8
-just mypy
-just deptry

View File

@@ -38,7 +38,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -56,7 +55,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -74,7 +72,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -89,11 +86,10 @@ repos:
- "--config"
- ".github/flake8.ini"
- "--extend-exclude"
- "capa/render/proto/capa_pb2.py,capa/features/extractors/binexport2/binexport2_pb2.py"
- "capa/render/proto/capa_pb2.py"
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -111,17 +107,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
- repo: local
hooks:
- id: deptry
name: deptry
stages: [push, manual]
language: system
entry: deptry .
always_run: true
pass_filenames: false
@@ -142,4 +127,3 @@ repos:
- "--ignore=tests/test_scripts.py"
always_run: true
pass_filenames: false

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +0,0 @@
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- name: "The FLARE Team"
title: "capa, a tool to identify capabilities in programs and sandbox traces."
date-released: 2020-07-16
url: "https://github.com/mandiant/capa"

View File

@@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (C) 2020 Mandiant, Inc.
Copyright (C) 2023 Mandiant, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,40 +1,22 @@
<br />
<div align="center">
<a href="https://mandiant.github.io/capa/" target="_blank">
<img src="https://github.com/mandiant/capa/blob/master/.github/logo.png">
</a>
<p align="center">
<a href="https://mandiant.github.io/capa/" target="_blank">Website</a>
|
<a href="https://github.com/mandiant/capa/releases/latest" target="_blank">Download</a>
|
<a href="https://mandiant.github.io/capa/explorer/" target="_blank">Web Interface</a>
</p>
<div align="center">
![capa](https://github.com/mandiant/capa/blob/master/.github/logo.png)
[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/flare-capa)](https://pypi.org/project/flare-capa)
[![Last release](https://img.shields.io/github/v/release/mandiant/capa)](https://github.com/mandiant/capa/releases)
[![Number of rules](https://gist.githubusercontent.com/capa-bot/6d7960e911f48b3b74916df8988cf0f3/raw/rules_badge.svg)](https://github.com/mandiant/capa-rules)
[![Number of rules](https://img.shields.io/badge/rules-864-blue.svg)](https://github.com/mandiant/capa-rules)
[![CI status](https://github.com/mandiant/capa/workflows/CI/badge.svg)](https://github.com/mandiant/capa/actions?query=workflow%3ACI+event%3Apush+branch%3Amaster)
[![Downloads](https://img.shields.io/github/downloads/mandiant/capa/total)](https://github.com/mandiant/capa/releases)
[![License](https://img.shields.io/badge/license-Apache--2.0-green.svg)](LICENSE.txt)
</div>
</div>
---
capa detects capabilities in executable files.
You run it against a PE, ELF, .NET module, shellcode file, or a sandbox report and it tells you what it thinks the program can do.
For example, it might suggest that the file is a backdoor, is capable of installing services, or relies on HTTP to communicate.
To interactively inspect capa results in your browser use the [capa Explorer Web](https://mandiant.github.io/capa/explorer/).
Check out:
- the overview in our first [capa blog post](https://www.mandiant.com/resources/capa-automatically-identify-malware-capabilities)
- the major version 2.0 updates described in our [second blog post](https://www.mandiant.com/resources/capa-2-better-stronger-faster)
- the major version 3.0 (ELF support) described in the [third blog post](https://www.mandiant.com/resources/elfant-in-the-room-capa-v3)
- the major version 4.0 (.NET support) described in the [fourth blog post](https://www.mandiant.com/resources/blog/capa-v4-casting-wider-net)
If you want to inspect or write capa rules, head on over to the [capa-rules repository](https://github.com/mandiant/capa-rules). Otherwise, keep reading.
Below you find a list of [our capa blog posts with more details.](#blog-posts)
# example capa output
```
$ capa.exe suspicious.exe
@@ -89,23 +71,16 @@ Download stable releases of the standalone capa binaries [here](https://github.c
To use capa as a library or integrate with another tool, see [doc/installation.md](https://github.com/mandiant/capa/blob/master/doc/installation.md) for further setup instructions.
# capa Explorer Web
The [capa Explorer Web](https://mandiant.github.io/capa/explorer/) enables you to interactively explore capa results in your web browser. Besides the online version you can download a standalone HTML file for local offline usage.
![capa Explorer Web screenshot](https://github.com/mandiant/capa/blob/master/doc/img/capa_web_explorer.png)
More details on the web UI is available in the [capa Explorer Web README](https://github.com/mandiant/capa/blob/master/web/explorer/README.md).
For more information about how to use capa, see [doc/usage.md](https://github.com/mandiant/capa/blob/master/doc/usage.md).
# example
In the above sample output, we run capa against an unknown binary (`suspicious.exe`),
and the tool reports that the program can send HTTP requests, decode data via XOR and Base64,
In the above sample output, we ran capa against an unknown binary (`suspicious.exe`),
and the tool reported that the program can send HTTP requests, decode data via XOR and Base64,
install services, and spawn new processes.
Taken together, this makes us think that `suspicious.exe` could be a persistent backdoor.
Therefore, our next analysis step might be to run `suspicious.exe` in a sandbox and try to recover the command and control server.
## detailed results
By passing the `-vv` flag (for very verbose), capa reports exactly where it found evidence of these capabilities.
This is useful for at least two reasons:
@@ -150,15 +125,10 @@ function @ 0x4011C0
...
```
capa also supports dynamic capabilities detection for multiple sandboxes including:
* [CAPE](https://github.com/kevoreilly/CAPEv2) (supported report formats: `.json`, `.json_`, `.json.gz`)
* [DRAKVUF](https://github.com/CERT-Polska/drakvuf-sandbox/) (supported report formats: `.log`, `.log.gz`)
* [VMRay](https://www.vmray.com/) (supported report formats: analysis archive `.zip`)
Additionally, capa also supports analyzing [CAPE](https://github.com/kevoreilly/CAPEv2) sandbox reports for dynamic capabilty extraction.
In order to use this, you first submit your sample to CAPE for analysis, and then run capa against the generated report (JSON).
To use this feature, submit your file to a supported sandbox and then download and run capa against the generated report file. This feature enables capa to match capabilities against dynamic and static features that the sandbox captured during execution.
Here's an example of running capa against a packed file, and then running capa against the CAPE report generated for the same packed file:
Here's an example of running capa against a packed binary, and then running capa against the CAPE report of that binary:
```yaml
$ capa 05be49819139a3fdcdbddbdefd298398779521f3d68daa25275cc77508e42310.exe
@@ -245,7 +215,6 @@ $ capa 05be49819139a3fdcdbddbdefd298398779521f3d68daa25275cc77508e42310.json
┕━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┙
```
# capa rules
capa uses a collection of rules to identify capabilities within a program.
These rules are easy to write, even for those new to reverse engineering.
By authoring rules, you can extend the capabilities that capa recognizes.
@@ -282,27 +251,15 @@ rule:
- property/read: System.Net.Sockets.TcpClient::Client
```
The [github.com/mandiant/capa-rules](https://github.com/mandiant/capa-rules) repository contains hundreds of standard rules that are distributed with capa.
The [github.com/mandiant/capa-rules](https://github.com/mandiant/capa-rules) repository contains hundreds of standard library rules that are distributed with capa.
Please learn to write rules and contribute new entries as you find interesting techniques in malware.
# IDA Pro plugin: capa explorer
If you use IDA Pro, then you can use the [capa explorer](https://github.com/mandiant/capa/tree/master/capa/ida/plugin) plugin.
capa explorer helps you identify interesting areas of a program and build new capa rules using features extracted directly from your IDA Pro database.
It also uses your local changes to the .idb to extract better features, such as when you rename a global variable that contains a dynamically resolved API address.
![capa + IDA Pro integration](https://github.com/mandiant/capa/blob/master/doc/img/explorer_expanded.png)
# Ghidra integration
If you use Ghidra, then you can use the [capa + Ghidra integration](/capa/ghidra/) to run capa's analysis directly on your Ghidra database and render the results in Ghidra's user interface.
<img src="https://github.com/mandiant/capa/assets/66766340/eeae33f4-99d4-42dc-a5e8-4c1b8c661492" width=300>
# blog posts
- [Dynamic capa: Exploring Executable Run-Time Behavior with the CAPE Sandbox](https://www.mandiant.com/resources/blog/dynamic-capa-executable-behavior-cape-sandbox)
- [capa v4: casting a wider .NET](https://www.mandiant.com/resources/blog/capa-v4-casting-wider-net) (.NET support)
- [ELFant in the Room capa v3](https://www.mandiant.com/resources/elfant-in-the-room-capa-v3) (ELF support)
- [capa 2.0: Better, Stronger, Faster](https://www.mandiant.com/resources/capa-2-better-stronger-faster)
- [capa: Automatically Identify Malware Capabilities](https://www.mandiant.com/resources/capa-automatically-identify-malware-capabilities)
If you use Ghidra, you can use the Python 3 [Ghidra feature extractor](/capa/ghidra/). This integration enables capa to extract features directly from your Ghidra database, which can help you identify capabilities in programs that you analyze using Ghidra.
# further information
## capa

View File

@@ -9,13 +9,16 @@
import logging
import itertools
import collections
from typing import Any, List, Tuple
from typing import Any, Tuple
import tqdm
import capa.perf
import capa.features.freeze as frz
import capa.render.result_document as rdoc
from capa.rules import Scope, RuleSet
from capa.engine import FeatureSet, MatchResults
from capa.helpers import redirecting_print_to_tqdm
from capa.capabilities.common import find_file_capabilities
from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, ProcessHandle, DynamicFeatureExtractor
@@ -62,7 +65,7 @@ def find_thread_capabilities(
features: FeatureSet = collections.defaultdict(set)
# matches found at the call scope.
# might be found at different calls, that's ok.
# might be found at different calls, thats ok.
call_matches: MatchResults = collections.defaultdict(list)
for ch in extractor.get_calls(ph, th):
@@ -100,11 +103,11 @@ def find_process_capabilities(
process_features: FeatureSet = collections.defaultdict(set)
# matches found at the basic threads.
# might be found at different threads, that's ok.
# might be found at different threads, thats ok.
thread_matches: MatchResults = collections.defaultdict(list)
# matches found at the call scope.
# might be found at different calls, that's ok.
# might be found at different calls, thats ok.
call_matches: MatchResults = collections.defaultdict(list)
for th in extractor.get_threads(ph):
@@ -135,30 +138,33 @@ def find_dynamic_capabilities(
feature_counts = rdoc.DynamicFeatureCounts(file=0, processes=())
assert isinstance(extractor, DynamicFeatureExtractor)
processes: List[ProcessHandle] = list(extractor.get_processes())
n_processes: int = len(processes)
with redirecting_print_to_tqdm(disable_progress):
with tqdm.contrib.logging.logging_redirect_tqdm():
pbar = tqdm.tqdm
if disable_progress:
# do not use tqdm to avoid unnecessary side effects when caller intends
# to disable progress completely
def pbar(s, *args, **kwargs):
return s
with capa.helpers.CapaProgressBar(
console=capa.helpers.log_console, transient=True, disable=disable_progress
) as pbar:
task = pbar.add_task("matching", total=n_processes, unit="processes")
for p in processes:
process_matches, thread_matches, call_matches, feature_count = find_process_capabilities(
ruleset, extractor, p
)
feature_counts.processes += (
rdoc.ProcessFeatureCount(address=frz.Address.from_capa(p.address), count=feature_count),
)
logger.debug("analyzed %s and extracted %d features", p.address, feature_count)
processes = list(extractor.get_processes())
for rule_name, res in process_matches.items():
all_process_matches[rule_name].extend(res)
for rule_name, res in thread_matches.items():
all_thread_matches[rule_name].extend(res)
for rule_name, res in call_matches.items():
all_call_matches[rule_name].extend(res)
pb = pbar(processes, desc="matching", unit=" processes", leave=False)
for p in pb:
process_matches, thread_matches, call_matches, feature_count = find_process_capabilities(
ruleset, extractor, p
)
feature_counts.processes += (
rdoc.ProcessFeatureCount(address=frz.Address.from_capa(p.address), count=feature_count),
)
logger.debug("analyzed %s and extracted %d features", p.address, feature_count)
pbar.advance(task)
for rule_name, res in process_matches.items():
all_process_matches[rule_name].extend(res)
for rule_name, res in thread_matches.items():
all_thread_matches[rule_name].extend(res)
for rule_name, res in call_matches.items():
all_call_matches[rule_name].extend(res)
# collection of features that captures the rule matches within process and thread scopes.
# mapping from feature (matched rule) to set of addresses at which it matched.

View File

@@ -10,14 +10,16 @@ import time
import logging
import itertools
import collections
from typing import Any, List, Tuple
from typing import Any, Tuple
import tqdm.contrib.logging
import capa.perf
import capa.helpers
import capa.features.freeze as frz
import capa.render.result_document as rdoc
from capa.rules import Scope, RuleSet
from capa.engine import FeatureSet, MatchResults
from capa.helpers import redirecting_print_to_tqdm
from capa.capabilities.common import find_file_capabilities
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle, StaticFeatureExtractor
@@ -64,7 +66,7 @@ def find_basic_block_capabilities(
features: FeatureSet = collections.defaultdict(set)
# matches found at the instruction scope.
# might be found at different instructions, that's ok.
# might be found at different instructions, thats ok.
insn_matches: MatchResults = collections.defaultdict(list)
for insn in extractor.get_instructions(f, bb):
@@ -104,11 +106,11 @@ def find_code_capabilities(
function_features: FeatureSet = collections.defaultdict(set)
# matches found at the basic block scope.
# might be found at different basic blocks, that's ok.
# might be found at different basic blocks, thats ok.
bb_matches: MatchResults = collections.defaultdict(list)
# matches found at the instruction scope.
# might be found at different instructions, that's ok.
# might be found at different instructions, thats ok.
insn_matches: MatchResults = collections.defaultdict(list)
for bb in extractor.get_basic_blocks(fh):
@@ -140,58 +142,63 @@ def find_static_capabilities(
library_functions: Tuple[rdoc.LibraryFunction, ...] = ()
assert isinstance(extractor, StaticFeatureExtractor)
functions: List[FunctionHandle] = list(extractor.get_functions())
n_funcs: int = len(functions)
n_libs: int = 0
percentage: float = 0
with redirecting_print_to_tqdm(disable_progress):
with tqdm.contrib.logging.logging_redirect_tqdm():
pbar = tqdm.tqdm
if capa.helpers.is_runtime_ghidra():
# Ghidrathon interpreter cannot properly handle
# the TMonitor thread that is created via a monitor_interval
# > 0
pbar.monitor_interval = 0
if disable_progress:
# do not use tqdm to avoid unnecessary side effects when caller intends
# to disable progress completely
def pbar(s, *args, **kwargs):
return s
with capa.helpers.CapaProgressBar(
console=capa.helpers.log_console, transient=True, disable=disable_progress
) as pbar:
task = pbar.add_task(
"matching", total=n_funcs, unit="functions", postfix=f"skipped {n_libs} library functions, {percentage}%"
)
for f in functions:
t0 = time.time()
if extractor.is_library_function(f.address):
function_name = extractor.get_function_name(f.address)
logger.debug("skipping library function 0x%x (%s)", f.address, function_name)
library_functions += (
rdoc.LibraryFunction(address=frz.Address.from_capa(f.address), name=function_name),
functions = list(extractor.get_functions())
n_funcs = len(functions)
pb = pbar(functions, desc="matching", unit=" functions", postfix="skipped 0 library functions", leave=False)
for f in pb:
t0 = time.time()
if extractor.is_library_function(f.address):
function_name = extractor.get_function_name(f.address)
logger.debug("skipping library function 0x%x (%s)", f.address, function_name)
library_functions += (
rdoc.LibraryFunction(address=frz.Address.from_capa(f.address), name=function_name),
)
n_libs = len(library_functions)
percentage = round(100 * (n_libs / n_funcs))
if isinstance(pb, tqdm.tqdm):
pb.set_postfix_str(f"skipped {n_libs} library functions ({percentage}%)")
continue
function_matches, bb_matches, insn_matches, feature_count = find_code_capabilities(
ruleset, extractor, f
)
n_libs = len(library_functions)
percentage = round(100 * (n_libs / n_funcs))
pbar.update(task, postfix=f"skipped {n_libs} library functions, {percentage}%")
pbar.advance(task)
continue
feature_counts.functions += (
rdoc.FunctionFeatureCount(address=frz.Address.from_capa(f.address), count=feature_count),
)
t1 = time.time()
function_matches, bb_matches, insn_matches, feature_count = find_code_capabilities(ruleset, extractor, f)
feature_counts.functions += (
rdoc.FunctionFeatureCount(address=frz.Address.from_capa(f.address), count=feature_count),
)
t1 = time.time()
match_count = sum(len(res) for res in function_matches.values())
match_count += sum(len(res) for res in bb_matches.values())
match_count += sum(len(res) for res in insn_matches.values())
logger.debug(
"analyzed function 0x%x and extracted %d features, %d matches in %0.02fs",
f.address,
feature_count,
match_count,
t1 - t0,
)
match_count = 0
for name, matches_ in itertools.chain(function_matches.items(), bb_matches.items(), insn_matches.items()):
if not ruleset.rules[name].is_subscope_rule():
match_count += len(matches_)
logger.debug(
"analyzed function 0x%x and extracted %d features, %d matches in %0.02fs",
f.address,
feature_count,
match_count,
t1 - t0,
)
for rule_name, res in function_matches.items():
all_function_matches[rule_name].extend(res)
for rule_name, res in bb_matches.items():
all_bb_matches[rule_name].extend(res)
for rule_name, res in insn_matches.items():
all_insn_matches[rule_name].extend(res)
pbar.advance(task)
for rule_name, res in function_matches.items():
all_function_matches[rule_name].extend(res)
for rule_name, res in bb_matches.items():
all_bb_matches[rule_name].extend(res)
for rule_name, res in insn_matches.items():
all_insn_matches[rule_name].extend(res)
# collection of features that captures the rule matches within function, BB, and instruction scopes.
# mapping from feature (matched rule) to set of addresses at which it matched.
@@ -206,7 +213,7 @@ def find_static_capabilities(
all_file_matches, feature_count = find_file_capabilities(ruleset, extractor, function_and_lower_features)
feature_counts.file = feature_count
matches: MatchResults = dict(
matches = dict(
itertools.chain(
# each rule exists in exactly one scope,
# so there won't be any overlap among these following MatchResults,

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -102,14 +102,14 @@ class And(Statement):
super().__init__(description=description)
self.children = children
def evaluate(self, features: FeatureSet, short_circuit=True):
def evaluate(self, ctx, short_circuit=True):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.and"] += 1
if short_circuit:
results = []
for child in self.children:
result = child.evaluate(features, short_circuit=short_circuit)
result = child.evaluate(ctx, short_circuit=short_circuit)
results.append(result)
if not result:
# short circuit
@@ -117,7 +117,7 @@ class And(Statement):
return Result(True, self, results)
else:
results = [child.evaluate(features, short_circuit=short_circuit) for child in self.children]
results = [child.evaluate(ctx, short_circuit=short_circuit) for child in self.children]
success = all(results)
return Result(success, self, results)
@@ -135,14 +135,14 @@ class Or(Statement):
super().__init__(description=description)
self.children = children
def evaluate(self, features: FeatureSet, short_circuit=True):
def evaluate(self, ctx, short_circuit=True):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.or"] += 1
if short_circuit:
results = []
for child in self.children:
result = child.evaluate(features, short_circuit=short_circuit)
result = child.evaluate(ctx, short_circuit=short_circuit)
results.append(result)
if result:
# short circuit as soon as we hit one match
@@ -150,7 +150,7 @@ class Or(Statement):
return Result(False, self, results)
else:
results = [child.evaluate(features, short_circuit=short_circuit) for child in self.children]
results = [child.evaluate(ctx, short_circuit=short_circuit) for child in self.children]
success = any(results)
return Result(success, self, results)
@@ -162,11 +162,11 @@ class Not(Statement):
super().__init__(description=description)
self.child = child
def evaluate(self, features: FeatureSet, short_circuit=True):
def evaluate(self, ctx, short_circuit=True):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.not"] += 1
results = [self.child.evaluate(features, short_circuit=short_circuit)]
results = [self.child.evaluate(ctx, short_circuit=short_circuit)]
success = not results[0]
return Result(success, self, results)
@@ -185,7 +185,7 @@ class Some(Statement):
self.count = count
self.children = children
def evaluate(self, features: FeatureSet, short_circuit=True):
def evaluate(self, ctx, short_circuit=True):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.some"] += 1
@@ -193,7 +193,7 @@ class Some(Statement):
results = []
satisfied_children_count = 0
for child in self.children:
result = child.evaluate(features, short_circuit=short_circuit)
result = child.evaluate(ctx, short_circuit=short_circuit)
results.append(result)
if result:
satisfied_children_count += 1
@@ -204,7 +204,7 @@ class Some(Statement):
return Result(False, self, results)
else:
results = [child.evaluate(features, short_circuit=short_circuit) for child in self.children]
results = [child.evaluate(ctx, short_circuit=short_circuit) for child in self.children]
# note that here we cast the child result as a bool
# because we've overridden `__bool__` above.
#
@@ -214,7 +214,7 @@ class Some(Statement):
class Range(Statement):
"""match if the child is contained in the feature set with a count in the given range."""
"""match if the child is contained in the ctx set with a count in the given range."""
def __init__(self, child, min=None, max=None, description=None):
super().__init__(description=description)
@@ -222,15 +222,15 @@ class Range(Statement):
self.min = min if min is not None else 0
self.max = max if max is not None else (1 << 64 - 1)
def evaluate(self, features: FeatureSet, short_circuit=True):
def evaluate(self, ctx, **kwargs):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.range"] += 1
count = len(features.get(self.child, []))
count = len(ctx.get(self.child, []))
if self.min == 0 and count == 0:
return Result(True, self, [])
return Result(self.min <= count <= self.max, self, [], locations=features.get(self.child))
return Result(self.min <= count <= self.max, self, [], locations=ctx.get(self.child))
def __str__(self):
if self.max == (1 << 64 - 1):
@@ -250,7 +250,7 @@ class Subscope(Statement):
self.scope = scope
self.child = child
def evaluate(self, features: FeatureSet, short_circuit=True):
def evaluate(self, ctx, **kwargs):
raise ValueError("cannot evaluate a subscope directly!")
@@ -270,14 +270,6 @@ class Subscope(Statement):
MatchResults = Mapping[str, List[Tuple[Address, Result]]]
def get_rule_namespaces(rule: "capa.rules.Rule") -> Iterator[str]:
namespace = rule.meta.get("namespace")
if namespace:
while namespace:
yield namespace
namespace, _, _ = namespace.rpartition("/")
def index_rule_matches(features: FeatureSet, rule: "capa.rules.Rule", locations: Iterable[Address]):
"""
record into the given featureset that the given rule matched at the given locations.
@@ -288,8 +280,11 @@ def index_rule_matches(features: FeatureSet, rule: "capa.rules.Rule", locations:
updates `features` in-place. doesn't modify the remaining arguments.
"""
features[capa.features.common.MatchedRule(rule.name)].update(locations)
for namespace in get_rule_namespaces(rule):
features[capa.features.common.MatchedRule(namespace)].update(locations)
namespace = rule.meta.get("namespace")
if namespace:
while namespace:
features[capa.features.common.MatchedRule(namespace)].update(locations)
namespace, _, _ = namespace.rpartition("/")
def match(rules: List["capa.rules.Rule"], features: FeatureSet, addr: Address) -> Tuple[FeatureSet, MatchResults]:

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -23,15 +23,3 @@ class UnsupportedOSError(ValueError):
class EmptyReportError(ValueError):
pass
class InvalidArgument(ValueError):
pass
class NonExistantFunctionError(ValueError):
pass
class NonExistantProcessError(ValueError):
pass

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -10,7 +10,8 @@ import abc
class Address(abc.ABC):
@abc.abstractmethod
def __eq__(self, other): ...
def __eq__(self, other):
...
@abc.abstractmethod
def __lt__(self, other):
@@ -93,7 +94,7 @@ class ThreadAddress(Address):
class DynamicCallAddress(Address):
"""addresses a call in a dynamic execution trace"""
"""addesses a call in a dynamic execution trace"""
def __init__(self, thread: ThreadAddress, id: int):
assert id >= 0

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -128,7 +128,7 @@ class Feature(abc.ABC): # noqa: B024
def __lt__(self, other):
# implementing sorting by serializing to JSON is a huge hack.
# it's slow, inelegant, and probably doesn't work intuitively;
# its slow, inelegant, and probably doesn't work intuitively;
# however, we only use it for deterministic output, so it's good enough for now.
# circular import
@@ -166,10 +166,10 @@ class Feature(abc.ABC): # noqa: B024
def __repr__(self):
return str(self)
def evaluate(self, features: "capa.engine.FeatureSet", short_circuit=True) -> Result:
def evaluate(self, ctx: Dict["Feature", Set[Address]], **kwargs) -> Result:
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature." + self.name] += 1
return Result(self in features, self, [], locations=features.get(self, set()))
return Result(self in ctx, self, [], locations=ctx.get(self, set()))
class MatchedRule(Feature):
@@ -207,7 +207,7 @@ class Substring(String):
super().__init__(value, description=description)
self.value = value
def evaluate(self, features: "capa.engine.FeatureSet", short_circuit=True):
def evaluate(self, ctx, short_circuit=True):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.substring"] += 1
@@ -216,7 +216,7 @@ class Substring(String):
matches: typing.DefaultDict[str, Set[Address]] = collections.defaultdict(set)
assert isinstance(self.value, str)
for feature, locations in features.items():
for feature, locations in ctx.items():
if not isinstance(feature, (String,)):
continue
@@ -227,7 +227,7 @@ class Substring(String):
if self.value in feature.value:
matches[feature.value].update(locations)
if short_circuit:
# we found one matching string, that's sufficient to match.
# we found one matching string, thats sufficient to match.
# don't collect other matching strings in this mode.
break
@@ -299,7 +299,7 @@ class Regex(String):
f"invalid regular expression: {value} it should use Python syntax, try it at https://pythex.org"
) from exc
def evaluate(self, features: "capa.engine.FeatureSet", short_circuit=True):
def evaluate(self, ctx, short_circuit=True):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.regex"] += 1
@@ -307,7 +307,7 @@ class Regex(String):
# will unique the locations later on.
matches: typing.DefaultDict[str, Set[Address]] = collections.defaultdict(set)
for feature, locations in features.items():
for feature, locations in ctx.items():
if not isinstance(feature, (String,)):
continue
@@ -322,7 +322,7 @@ class Regex(String):
if self.re.search(feature.value):
matches[feature.value].update(locations)
if short_circuit:
# we found one matching string, that's sufficient to match.
# we found one matching string, thats sufficient to match.
# don't collect other matching strings in this mode.
break
@@ -384,14 +384,12 @@ class Bytes(Feature):
super().__init__(value, description=description)
self.value = value
def evaluate(self, features: "capa.engine.FeatureSet", short_circuit=True):
assert isinstance(self.value, bytes)
def evaluate(self, ctx, **kwargs):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature.bytes"] += 1
capa.perf.counters["evaluate.feature.bytes." + str(len(self.value))] += 1
for feature, locations in features.items():
assert isinstance(self.value, bytes)
for feature, locations in ctx.items():
if not isinstance(feature, (Bytes,)):
continue
@@ -409,10 +407,9 @@ class Bytes(Feature):
# other candidates here: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#machine-types
ARCH_I386 = "i386"
ARCH_AMD64 = "amd64"
ARCH_AARCH64 = "aarch64"
# dotnet
ARCH_ANY = "any"
VALID_ARCH = (ARCH_I386, ARCH_AMD64, ARCH_AARCH64, ARCH_ANY)
VALID_ARCH = (ARCH_I386, ARCH_AMD64, ARCH_ANY)
class Arch(Feature):
@@ -424,11 +421,10 @@ class Arch(Feature):
OS_WINDOWS = "windows"
OS_LINUX = "linux"
OS_MACOS = "macos"
OS_ANDROID = "android"
# dotnet
OS_ANY = "any"
VALID_OS = {os.value for os in capa.features.extractors.elf.OS}
VALID_OS.update({OS_WINDOWS, OS_LINUX, OS_MACOS, OS_ANY, OS_ANDROID})
VALID_OS.update({OS_WINDOWS, OS_LINUX, OS_MACOS, OS_ANY})
# internal only, not to be used in rules
OS_AUTO = "auto"
@@ -438,11 +434,11 @@ class OS(Feature):
super().__init__(value, description=description)
self.name = "os"
def evaluate(self, features: "capa.engine.FeatureSet", short_circuit=True):
def evaluate(self, ctx, **kwargs):
capa.perf.counters["evaluate.feature"] += 1
capa.perf.counters["evaluate.feature." + self.name] += 1
for feature, locations in features.items():
for feature, locations in ctx.items():
if not isinstance(feature, (OS,)):
continue
@@ -462,28 +458,18 @@ FORMAT_AUTO = "auto"
FORMAT_SC32 = "sc32"
FORMAT_SC64 = "sc64"
FORMAT_CAPE = "cape"
FORMAT_DRAKVUF = "drakvuf"
FORMAT_VMRAY = "vmray"
FORMAT_BINEXPORT2 = "binexport2"
FORMAT_FREEZE = "freeze"
FORMAT_RESULT = "result"
STATIC_FORMATS = {
FORMAT_SC32,
FORMAT_SC64,
FORMAT_PE,
FORMAT_ELF,
FORMAT_DOTNET,
FORMAT_FREEZE,
FORMAT_RESULT,
FORMAT_BINEXPORT2,
}
DYNAMIC_FORMATS = {
FORMAT_CAPE,
FORMAT_DRAKVUF,
FORMAT_VMRAY,
FORMAT_FREEZE,
FORMAT_RESULT,
}
FORMAT_FREEZE = "freeze"
FORMAT_RESULT = "result"
FORMAT_UNKNOWN = "unknown"
@@ -496,6 +482,6 @@ class Format(Feature):
def is_global_feature(feature):
"""
is this a feature that is extracted at every scope?
today, these are OS, arch, and format features.
today, these are OS and arch features.
"""
return isinstance(feature, (OS, Arch, Format))
return isinstance(feature, (OS, Arch))

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -9,9 +9,7 @@
import abc
import hashlib
import dataclasses
from copy import copy
from types import MethodType
from typing import Any, Set, Dict, Tuple, Union, Iterator
from typing import Any, Dict, Tuple, Union, Iterator
from dataclasses import dataclass
# TODO(williballenthin): use typing.TypeAlias directly when Python 3.9 is deprecated
@@ -77,7 +75,7 @@ class BBHandle:
@dataclass
class InsnHandle:
"""reference to an instruction recognized by a feature extractor.
"""reference to a instruction recognized by a feature extractor.
Attributes:
address: the address of the instruction address.
@@ -298,22 +296,6 @@ class StaticFeatureExtractor:
raise NotImplementedError()
def FunctionFilter(extractor: StaticFeatureExtractor, functions: Set) -> StaticFeatureExtractor:
original_get_functions = extractor.get_functions
def filtered_get_functions(self):
yield from (f for f in original_get_functions() if f.address in functions)
# we make a copy of the original extractor object and then update its get_functions() method with the decorated filter one.
# this is in order to preserve the original extractor object's get_functions() method, in case it is used elsewhere in the code.
# an example where this is important is in our testfiles where we may use the same extractor object with different tests,
# with some of these tests needing to install a functions filter on the extractor object.
new_extractor = copy(extractor)
new_extractor.get_functions = MethodType(filtered_get_functions, extractor) # type: ignore
return new_extractor
@dataclass
class ProcessHandle:
"""
@@ -485,20 +467,4 @@ class DynamicFeatureExtractor:
raise NotImplementedError()
def ProcessFilter(extractor: DynamicFeatureExtractor, processes: Set) -> DynamicFeatureExtractor:
original_get_processes = extractor.get_processes
def filtered_get_processes(self):
yield from (f for f in original_get_processes() if f.address.pid in processes)
# we make a copy of the original extractor object and then update its get_processes() method with the decorated filter one.
# this is in order to preserve the original extractor object's get_processes() method, in case it is used elsewhere in the code.
# an example where this is important is in our testfiles where we may use the same extractor object with different tests,
# with some of these tests needing to install a processes filter on the extractor object.
new_extractor = copy(extractor)
new_extractor.get_processes = MethodType(filtered_get_processes, extractor) # type: ignore
return new_extractor
FeatureExtractor: TypeAlias = Union[StaticFeatureExtractor, DynamicFeatureExtractor]

View File

@@ -1,416 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Proto files generated via protobuf v24.4:
protoc --python_out=. --mypy_out=. binexport2.proto
from BinExport2 at 6916731d5f6693c4a4f0a052501fd3bd92cfd08b
https://github.com/google/binexport/blob/6916731/binexport2.proto
"""
import io
import hashlib
import logging
import contextlib
from typing import Set, Dict, List, Tuple, Iterator
from pathlib import Path
from collections import defaultdict
from dataclasses import dataclass
from pefile import PE
from elftools.elf.elffile import ELFFile
import capa.features.common
import capa.features.extractors.common
import capa.features.extractors.binexport2.helpers
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
def get_binexport2(sample: Path) -> BinExport2:
be2: BinExport2 = BinExport2()
be2.ParseFromString(sample.read_bytes())
return be2
def compute_common_prefix_length(m: str, n: str) -> int:
# ensure #m < #n
if len(n) < len(m):
m, n = n, m
for i, c in enumerate(m):
if n[i] != c:
return i
return len(m)
def get_sample_from_binexport2(input_file: Path, be2: BinExport2, search_paths: List[Path]) -> Path:
"""attempt to find the sample file, given a BinExport2 file.
searches in the same directory as the BinExport2 file, and then in search_paths.
"""
def filename_similarity_key(p: Path) -> Tuple[int, str]:
# note closure over input_file.
# sort first by length of common prefix, then by name (for stability)
return (compute_common_prefix_length(p.name, input_file.name), p.name)
wanted_sha256: str = be2.meta_information.executable_id.lower()
input_directory: Path = input_file.parent
siblings: List[Path] = [p for p in input_directory.iterdir() if p.is_file()]
siblings.sort(key=filename_similarity_key, reverse=True)
for sibling in siblings:
# e.g. with open IDA files in the same directory on Windows
with contextlib.suppress(PermissionError):
if hashlib.sha256(sibling.read_bytes()).hexdigest().lower() == wanted_sha256:
return sibling
for search_path in search_paths:
candidates: List[Path] = [p for p in search_path.iterdir() if p.is_file()]
candidates.sort(key=filename_similarity_key, reverse=True)
for candidate in candidates:
with contextlib.suppress(PermissionError):
if hashlib.sha256(candidate.read_bytes()).hexdigest().lower() == wanted_sha256:
return candidate
raise ValueError("cannot find sample, you may specify the path using the CAPA_SAMPLES_DIR environment variable")
class BinExport2Index:
def __init__(self, be2: BinExport2):
self.be2: BinExport2 = be2
self.callers_by_vertex_index: Dict[int, List[int]] = defaultdict(list)
self.callees_by_vertex_index: Dict[int, List[int]] = defaultdict(list)
# note: flow graph != call graph (vertex)
self.flow_graph_index_by_address: Dict[int, int] = {}
self.flow_graph_address_by_index: Dict[int, int] = {}
# edges that come from the given basic block
self.source_edges_by_basic_block_index: Dict[int, List[BinExport2.FlowGraph.Edge]] = defaultdict(list)
# edges that end up at the given basic block
self.target_edges_by_basic_block_index: Dict[int, List[BinExport2.FlowGraph.Edge]] = defaultdict(list)
self.vertex_index_by_address: Dict[int, int] = {}
self.data_reference_index_by_source_instruction_index: Dict[int, List[int]] = defaultdict(list)
self.data_reference_index_by_target_address: Dict[int, List[int]] = defaultdict(list)
self.string_reference_index_by_source_instruction_index: Dict[int, List[int]] = defaultdict(list)
self.insn_address_by_index: Dict[int, int] = {}
self.insn_index_by_address: Dict[int, int] = {}
self.insn_by_address: Dict[int, BinExport2.Instruction] = {}
# must index instructions first
self._index_insn_addresses()
self._index_vertex_edges()
self._index_flow_graph_nodes()
self._index_flow_graph_edges()
self._index_call_graph_vertices()
self._index_data_references()
self._index_string_references()
def get_insn_address(self, insn_index: int) -> int:
assert insn_index in self.insn_address_by_index, f"insn must be indexed, missing {insn_index}"
return self.insn_address_by_index[insn_index]
def get_basic_block_address(self, basic_block_index: int) -> int:
basic_block: BinExport2.BasicBlock = self.be2.basic_block[basic_block_index]
first_instruction_index: int = next(self.instruction_indices(basic_block))
return self.get_insn_address(first_instruction_index)
def _index_vertex_edges(self):
for edge in self.be2.call_graph.edge:
if not edge.source_vertex_index:
continue
if not edge.target_vertex_index:
continue
self.callers_by_vertex_index[edge.target_vertex_index].append(edge.source_vertex_index)
self.callees_by_vertex_index[edge.source_vertex_index].append(edge.target_vertex_index)
def _index_flow_graph_nodes(self):
for flow_graph_index, flow_graph in enumerate(self.be2.flow_graph):
function_address: int = self.get_basic_block_address(flow_graph.entry_basic_block_index)
self.flow_graph_index_by_address[function_address] = flow_graph_index
self.flow_graph_address_by_index[flow_graph_index] = function_address
def _index_flow_graph_edges(self):
for flow_graph in self.be2.flow_graph:
for edge in flow_graph.edge:
if not edge.HasField("source_basic_block_index") or not edge.HasField("target_basic_block_index"):
continue
self.source_edges_by_basic_block_index[edge.source_basic_block_index].append(edge)
self.target_edges_by_basic_block_index[edge.target_basic_block_index].append(edge)
def _index_call_graph_vertices(self):
for vertex_index, vertex in enumerate(self.be2.call_graph.vertex):
if not vertex.HasField("address"):
continue
vertex_address: int = vertex.address
self.vertex_index_by_address[vertex_address] = vertex_index
def _index_data_references(self):
for data_reference_index, data_reference in enumerate(self.be2.data_reference):
self.data_reference_index_by_source_instruction_index[data_reference.instruction_index].append(
data_reference_index
)
self.data_reference_index_by_target_address[data_reference.address].append(data_reference_index)
def _index_string_references(self):
for string_reference_index, string_reference in enumerate(self.be2.string_reference):
self.string_reference_index_by_source_instruction_index[string_reference.instruction_index].append(
string_reference_index
)
def _index_insn_addresses(self):
# see https://github.com/google/binexport/blob/39f6445c232bb5caf5c4a2a996de91dfa20c48e8/binexport.cc#L45
if len(self.be2.instruction) == 0:
return
assert self.be2.instruction[0].HasField("address"), "first insn must have explicit address"
addr: int = 0
next_addr: int = 0
for idx, insn in enumerate(self.be2.instruction):
if insn.HasField("address"):
addr = insn.address
next_addr = addr + len(insn.raw_bytes)
else:
addr = next_addr
next_addr += len(insn.raw_bytes)
self.insn_address_by_index[idx] = addr
self.insn_index_by_address[addr] = idx
self.insn_by_address[addr] = insn
@staticmethod
def instruction_indices(basic_block: BinExport2.BasicBlock) -> Iterator[int]:
"""
For a given basic block, enumerate the instruction indices.
"""
for index_range in basic_block.instruction_index:
if not index_range.HasField("end_index"):
yield index_range.begin_index
continue
else:
yield from range(index_range.begin_index, index_range.end_index)
def basic_block_instructions(
self, basic_block: BinExport2.BasicBlock
) -> Iterator[Tuple[int, BinExport2.Instruction, int]]:
"""
For a given basic block, enumerate the instruction indices,
the instruction instances, and their addresses.
"""
for instruction_index in self.instruction_indices(basic_block):
instruction: BinExport2.Instruction = self.be2.instruction[instruction_index]
instruction_address: int = self.get_insn_address(instruction_index)
yield instruction_index, instruction, instruction_address
def get_function_name_by_vertex(self, vertex_index: int) -> str:
vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[vertex_index]
name: str = f"sub_{vertex.address:x}"
if vertex.HasField("mangled_name"):
name = vertex.mangled_name
if vertex.HasField("demangled_name"):
name = vertex.demangled_name
if vertex.HasField("library_index"):
library: BinExport2.Library = self.be2.library[vertex.library_index]
if library.HasField("name"):
name = f"{library.name}!{name}"
return name
def get_function_name_by_address(self, address: int) -> str:
if address not in self.vertex_index_by_address:
return ""
vertex_index: int = self.vertex_index_by_address[address]
return self.get_function_name_by_vertex(vertex_index)
def get_instruction_by_address(self, address: int) -> BinExport2.Instruction:
assert address in self.insn_by_address, f"address must be indexed, missing {address:x}"
return self.insn_by_address[address]
class BinExport2Analysis:
def __init__(self, be2: BinExport2, idx: BinExport2Index, buf: bytes):
self.be2: BinExport2 = be2
self.idx: BinExport2Index = idx
self.buf: bytes = buf
self.base_address: int = 0
self.thunks: Dict[int, int] = {}
self._find_base_address()
self._compute_thunks()
def _find_base_address(self):
sections_with_perms: Iterator[BinExport2.Section] = filter(
lambda s: s.flag_r or s.flag_w or s.flag_x, self.be2.section
)
# assume the lowest address is the base address.
# this works as long as BinExport doesn't record other
# libraries mapped into memory.
self.base_address = min(s.address for s in sections_with_perms)
logger.debug("found base address: %x", self.base_address)
def _compute_thunks(self):
for addr, idx in self.idx.vertex_index_by_address.items():
vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[idx]
if not capa.features.extractors.binexport2.helpers.is_vertex_type(
vertex, BinExport2.CallGraph.Vertex.Type.THUNK
):
continue
curr_idx: int = idx
for _ in range(capa.features.common.THUNK_CHAIN_DEPTH_DELTA):
thunk_callees: List[int] = self.idx.callees_by_vertex_index[curr_idx]
# if this doesn't hold, then it doesn't seem like this is a thunk,
# because either, len is:
# 0 and the thunk doesn't point to anything, or
# >1 and the thunk may end up at many functions.
assert len(thunk_callees) == 1, f"thunk @ {hex(addr)} failed"
thunked_idx: int = thunk_callees[0]
thunked_vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[thunked_idx]
if not capa.features.extractors.binexport2.helpers.is_vertex_type(
thunked_vertex, BinExport2.CallGraph.Vertex.Type.THUNK
):
assert thunked_vertex.HasField("address")
self.thunks[addr] = thunked_vertex.address
break
curr_idx = thunked_idx
@dataclass
class MemoryRegion:
# location of the bytes, potentially relative to a base address
address: int
buf: bytes
@property
def end(self) -> int:
return self.address + len(self.buf)
def contains(self, address: int) -> bool:
# note: address must be relative to any base address
return self.address <= address < self.end
class ReadMemoryError(ValueError): ...
class AddressNotMappedError(ReadMemoryError): ...
@dataclass
class AddressSpace:
base_address: int
memory_regions: Tuple[MemoryRegion, ...]
def read_memory(self, address: int, length: int) -> bytes:
rva: int = address - self.base_address
for region in self.memory_regions:
if region.contains(rva):
offset: int = rva - region.address
return region.buf[offset : offset + length]
raise AddressNotMappedError(address)
@classmethod
def from_pe(cls, pe: PE, base_address: int):
regions: List[MemoryRegion] = []
for section in pe.sections:
address: int = section.VirtualAddress
size: int = section.Misc_VirtualSize
buf: bytes = section.get_data()
if len(buf) != size:
# pad the section with NULLs
# assume page alignment is already handled.
# might need more hardening here.
buf += b"\x00" * (size - len(buf))
regions.append(MemoryRegion(address, buf))
return cls(base_address, tuple(regions))
@classmethod
def from_elf(cls, elf: ELFFile, base_address: int):
regions: List[MemoryRegion] = []
# ELF segments are for runtime data,
# ELF sections are for link-time data.
for segment in elf.iter_segments():
# assume p_align is consistent with addresses here.
# otherwise, should harden this loader.
segment_rva: int = segment.header.p_vaddr
segment_size: int = segment.header.p_memsz
segment_data: bytes = segment.data()
if len(segment_data) < segment_size:
# pad the section with NULLs
# assume page alignment is already handled.
# might need more hardening here.
segment_data += b"\x00" * (segment_size - len(segment_data))
regions.append(MemoryRegion(segment_rva, segment_data))
return cls(base_address, tuple(regions))
@classmethod
def from_buf(cls, buf: bytes, base_address: int):
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: PE = PE(data=buf)
return cls.from_pe(pe, base_address)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
return cls.from_elf(elf, base_address)
else:
raise NotImplementedError("file format address space")
@dataclass
class AnalysisContext:
sample_bytes: bytes
be2: BinExport2
idx: BinExport2Index
analysis: BinExport2Analysis
address_space: AddressSpace
@dataclass
class FunctionContext:
ctx: AnalysisContext
flow_graph_index: int
format: Set[str]
os: Set[str]
arch: Set[str]
@dataclass
class BasicBlockContext:
basic_block_index: int
@dataclass
class InstructionContext:
instruction_index: int

View File

@@ -1,15 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
def is_stack_register_expression(be2: BinExport2, expression: BinExport2.Expression) -> bool:
return bool(
expression and expression.type == BinExport2.Expression.REGISTER and expression.symbol.lower().endswith("sp")
)

View File

@@ -1,155 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import List, Tuple, Iterator, Optional
import capa.features.extractors.binexport2.helpers
from capa.features.insn import MAX_STRUCTURE_SIZE, Number, Offset, OperandNumber, OperandOffset
from capa.features.common import Feature, Characteristic
from capa.features.address import Address
from capa.features.extractors.binexport2 import FunctionContext, InstructionContext
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle
from capa.features.extractors.binexport2.helpers import (
BinExport2InstructionPatternMatcher,
mask_immediate,
is_address_mapped,
get_instruction_mnemonic,
get_operand_register_expression,
get_operand_immediate_expression,
)
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
from capa.features.extractors.binexport2.arch.arm.helpers import is_stack_register_expression
logger = logging.getLogger(__name__)
def extract_insn_number_features(
fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
instruction_index: int = ii.instruction_index
instruction: BinExport2.Instruction = be2.instruction[instruction_index]
if len(instruction.operand_index) == 0:
# skip things like:
# .text:0040116e leave
return
mnemonic: str = get_instruction_mnemonic(be2, instruction)
if mnemonic in ("add", "sub"):
assert len(instruction.operand_index) == 3
operand1_expression: Optional[BinExport2.Expression] = get_operand_register_expression(
be2, be2.operand[instruction.operand_index[1]]
)
if operand1_expression and is_stack_register_expression(be2, operand1_expression):
# skip things like:
# add x0,sp,#0x8
return
for i, operand_index in enumerate(instruction.operand_index):
operand: BinExport2.Operand = be2.operand[operand_index]
immediate_expression: Optional[BinExport2.Expression] = get_operand_immediate_expression(be2, operand)
if not immediate_expression:
continue
value: int = mask_immediate(fhi.arch, immediate_expression.immediate)
if is_address_mapped(be2, value):
continue
yield Number(value), ih.address
yield OperandNumber(i, value), ih.address
if mnemonic == "add" and i == 2:
if 0 < value < MAX_STRUCTURE_SIZE:
yield Offset(value), ih.address
yield OperandOffset(i, value), ih.address
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack), #int] ; capture #int
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack), #int]! ; capture #int
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack)], #int ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack), #int] ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack), #int]! ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack)], #int ; capture #int
"""
)
def extract_insn_offset_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
match = OFFSET_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
return
value = match.expression.immediate
value = mask_immediate(fhi.arch, value)
if not is_address_mapped(be2, value):
value = capa.features.extractors.binexport2.helpers.twos_complement(fhi.arch, value)
yield Offset(value), ih.address
yield OperandOffset(match.operand_index, value), ih.address
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
eor reg, reg, reg
eor reg, reg, #int
"""
)
def extract_insn_nzxor_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
if NZXOR_PATTERNS.match_with_be2(be2, ii.instruction_index) is None:
return
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
# guaranteed to be simple int/reg operands
# so we don't have to realize the tree/list.
operands: List[BinExport2.Operand] = [be2.operand[operand_index] for operand_index in instruction.operand_index]
if operands[1] != operands[2]:
yield Characteristic("nzxor"), ih.address
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
blx|bx|blr reg
"""
)
def extract_function_indirect_call_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
if INDIRECT_CALL_PATTERNS.match_with_be2(be2, ii.instruction_index) is not None:
yield Characteristic("indirect call"), ih.address

View File

@@ -1,135 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import List, Optional
from dataclasses import dataclass
from capa.features.extractors.binexport2.helpers import get_operand_expressions
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
# security cookie checks may perform non-zeroing XORs, these are expected within a certain
# byte range within the first and returning basic blocks, this helps to reduce FP features
SECURITY_COOKIE_BYTES_DELTA: int = 0x40
@dataclass
class OperandPhraseInfo:
scale: Optional[BinExport2.Expression] = None
index: Optional[BinExport2.Expression] = None
base: Optional[BinExport2.Expression] = None
displacement: Optional[BinExport2.Expression] = None
def get_operand_phrase_info(be2: BinExport2, operand: BinExport2.Operand) -> Optional[OperandPhraseInfo]:
# assume the following (see https://blog.yossarian.net/2020/06/13/How-x86_64-addresses-memory):
#
# Scale: A 2-bit constant factor
# Index: Any general purpose register
# Base: Any general purpose register
# Displacement: An integral offset
expressions: List[BinExport2.Expression] = get_operand_expressions(be2, operand)
# skip expression up to and including BinExport2.Expression.DEREFERENCE, assume caller
# has checked for BinExport2.Expression.DEREFERENCE
for i, expression in enumerate(expressions):
if expression.type == BinExport2.Expression.DEREFERENCE:
expressions = expressions[i + 1 :]
break
expression0: BinExport2.Expression
expression1: BinExport2.Expression
expression2: BinExport2.Expression
expression3: BinExport2.Expression
expression4: BinExport2.Expression
if len(expressions) == 1:
expression0 = expressions[0]
assert (
expression0.type == BinExport2.Expression.IMMEDIATE_INT
or expression0.type == BinExport2.Expression.REGISTER
)
if expression0.type == BinExport2.Expression.IMMEDIATE_INT:
# Displacement
return OperandPhraseInfo(displacement=expression0)
elif expression0.type == BinExport2.Expression.REGISTER:
# Base
return OperandPhraseInfo(base=expression0)
elif len(expressions) == 3:
expression0 = expressions[0]
expression1 = expressions[1]
expression2 = expressions[2]
assert expression0.type == BinExport2.Expression.REGISTER
assert expression1.type == BinExport2.Expression.OPERATOR
assert (
expression2.type == BinExport2.Expression.IMMEDIATE_INT
or expression2.type == BinExport2.Expression.REGISTER
)
if expression2.type == BinExport2.Expression.REGISTER:
# Base + Index
return OperandPhraseInfo(base=expression0, index=expression2)
elif expression2.type == BinExport2.Expression.IMMEDIATE_INT:
# Base + Displacement
return OperandPhraseInfo(base=expression0, displacement=expression2)
elif len(expressions) == 5:
expression0 = expressions[0]
expression1 = expressions[1]
expression2 = expressions[2]
expression3 = expressions[3]
expression4 = expressions[4]
assert expression0.type == BinExport2.Expression.REGISTER
assert expression1.type == BinExport2.Expression.OPERATOR
assert (
expression2.type == BinExport2.Expression.REGISTER
or expression2.type == BinExport2.Expression.IMMEDIATE_INT
)
assert expression3.type == BinExport2.Expression.OPERATOR
assert expression4.type == BinExport2.Expression.IMMEDIATE_INT
if expression1.symbol == "+" and expression3.symbol == "+":
# Base + Index + Displacement
return OperandPhraseInfo(base=expression0, index=expression2, displacement=expression4)
elif expression1.symbol == "+" and expression3.symbol == "*":
# Base + (Index * Scale)
return OperandPhraseInfo(base=expression0, index=expression2, scale=expression3)
elif expression1.symbol == "*" and expression3.symbol == "+":
# (Index * Scale) + Displacement
return OperandPhraseInfo(index=expression0, scale=expression2, displacement=expression3)
else:
raise NotImplementedError(expression1.symbol, expression3.symbol)
elif len(expressions) == 7:
expression0 = expressions[0]
expression1 = expressions[1]
expression2 = expressions[2]
expression3 = expressions[3]
expression4 = expressions[4]
expression5 = expressions[5]
expression6 = expressions[6]
assert expression0.type == BinExport2.Expression.REGISTER
assert expression1.type == BinExport2.Expression.OPERATOR
assert expression2.type == BinExport2.Expression.REGISTER
assert expression3.type == BinExport2.Expression.OPERATOR
assert expression4.type == BinExport2.Expression.IMMEDIATE_INT
assert expression5.type == BinExport2.Expression.OPERATOR
assert expression6.type == BinExport2.Expression.IMMEDIATE_INT
# Base + (Index * Scale) + Displacement
return OperandPhraseInfo(base=expression0, index=expression2, scale=expression4, displacement=expression6)
else:
raise NotImplementedError(len(expressions))
return None

View File

@@ -1,248 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import List, Tuple, Iterator
import capa.features.extractors.strings
import capa.features.extractors.binexport2.helpers
from capa.features.insn import MAX_STRUCTURE_SIZE, Number, Offset, OperandNumber, OperandOffset
from capa.features.common import Feature, Characteristic
from capa.features.address import Address
from capa.features.extractors.binexport2 import BinExport2Index, FunctionContext, BasicBlockContext, InstructionContext
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle
from capa.features.extractors.binexport2.helpers import (
BinExport2InstructionPatternMatcher,
mask_immediate,
is_address_mapped,
get_instruction_mnemonic,
)
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
from capa.features.extractors.binexport2.arch.intel.helpers import SECURITY_COOKIE_BYTES_DELTA
logger = logging.getLogger(__name__)
IGNORE_NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
ret #int
retn #int
add reg(stack), #int
sub reg(stack), #int
"""
)
NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
push #int0 ; capture #int0
# its a little tedious to enumerate all the address forms
# but at least we are explicit
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar reg, #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [#int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + #int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + reg + #int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + reg * #int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + reg * #int + #int], #int0 ; capture #int0
imul reg, reg, #int ; capture #int
# note that int is first
cmp|test #int0, reg ; capture #int0
# imagine reg is zero'd out, then this is like `mov reg, #int`
# which is not uncommon.
lea reg, [reg + #int] ; capture #int
"""
)
def extract_insn_number_features(
fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
if IGNORE_NUMBER_PATTERNS.match_with_be2(be2, ii.instruction_index):
return
match = NUMBER_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
return
value: int = mask_immediate(fhi.arch, match.expression.immediate)
if is_address_mapped(be2, value):
return
yield Number(value), ih.address
yield OperandNumber(match.operand_index, value), ih.address
instruction_index: int = ii.instruction_index
instruction: BinExport2.Instruction = be2.instruction[instruction_index]
mnemonic: str = get_instruction_mnemonic(be2, instruction)
if mnemonic.startswith("add"):
if 0 < value < MAX_STRUCTURE_SIZE:
yield Offset(value), ih.address
yield OperandOffset(match.operand_index, value), ih.address
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
mov|movzx|movsb|cmp [reg + reg * #int + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg * #int + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg + reg + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg(not-stack) + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg + reg * #int + #int0], reg ; capture #int0
mov|movzx|movsb|cmp [reg * #int + #int0], reg ; capture #int0
mov|movzx|movsb|cmp [reg + reg + #int0], reg ; capture #int0
mov|movzx|movsb|cmp [reg(not-stack) + #int0], reg ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg + reg * #int + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg * #int + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg + reg + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg(not-stack) + #int0] ; capture #int0
"""
)
# these are patterns that access offset 0 from some pointer
# (pointer is not the stack pointer).
OFFSET_ZERO_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
mov|movzx|movsb [reg(not-stack)], reg
mov|movzx|movsb [reg(not-stack)], #int
lea reg, [reg(not-stack)]
"""
)
def extract_insn_offset_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
match = OFFSET_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
match = OFFSET_ZERO_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
return
yield Offset(0), ih.address
yield OperandOffset(match.operand_index, 0), ih.address
value = mask_immediate(fhi.arch, match.expression.immediate)
if is_address_mapped(be2, value):
return
value = capa.features.extractors.binexport2.helpers.twos_complement(fhi.arch, value, 32)
yield Offset(value), ih.address
yield OperandOffset(match.operand_index, value), ih.address
def is_security_cookie(
fhi: FunctionContext,
bbi: BasicBlockContext,
instruction_address: int,
instruction: BinExport2.Instruction,
) -> bool:
"""
check if an instruction is related to security cookie checks.
"""
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
# security cookie check should use SP or BP
op1: BinExport2.Operand = be2.operand[instruction.operand_index[1]]
op1_exprs: List[BinExport2.Expression] = [be2.expression[expr_i] for expr_i in op1.expression_index]
if all(expr.symbol.lower() not in ("bp", "esp", "ebp", "rbp", "rsp") for expr in op1_exprs):
return False
# check_nzxor_security_cookie_delta
# if insn falls at the start of first entry block of the parent function.
flow_graph: BinExport2.FlowGraph = be2.flow_graph[fhi.flow_graph_index]
basic_block_index: int = bbi.basic_block_index
bb: BinExport2.BasicBlock = be2.basic_block[basic_block_index]
if flow_graph.entry_basic_block_index == basic_block_index:
first_addr: int = min((idx.insn_address_by_index[ir.begin_index] for ir in bb.instruction_index))
if instruction_address < first_addr + SECURITY_COOKIE_BYTES_DELTA:
return True
# or insn falls at the end before return in a terminal basic block.
if basic_block_index not in (e.source_basic_block_index for e in flow_graph.edge):
last_addr: int = max((idx.insn_address_by_index[ir.end_index - 1] for ir in bb.instruction_index))
if instruction_address > last_addr - SECURITY_COOKIE_BYTES_DELTA:
return True
return False
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
xor|xorpd|xorps|pxor reg, reg
xor|xorpd|xorps|pxor reg, #int
"""
)
def extract_insn_nzxor_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
"""
parse non-zeroing XOR instruction from the given instruction.
ignore expected non-zeroing XORs, e.g. security cookies.
"""
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
if NZXOR_PATTERNS.match_with_be2(be2, ii.instruction_index) is None:
return
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
# guaranteed to be simple int/reg operands
# so we don't have to realize the tree/list.
operands: List[BinExport2.Operand] = [be2.operand[operand_index] for operand_index in instruction.operand_index]
if operands[0] == operands[1]:
return
instruction_address: int = idx.insn_address_by_index[ii.instruction_index]
if is_security_cookie(fhi, bbh.inner, instruction_address, instruction):
return
yield Characteristic("nzxor"), ih.address
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
call|jmp reg0
call|jmp [reg + reg * #int + #int]
call|jmp [reg + reg * #int]
call|jmp [reg * #int + #int]
call|jmp [reg + reg + #int]
call|jmp [reg + #int]
call|jmp [reg]
"""
)
def extract_function_indirect_call_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
match = INDIRECT_CALL_PATTERNS.match_with_be2(be2, ii.instruction_index)
if match is None:
return
yield Characteristic("indirect call"), ih.address

View File

@@ -1,40 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import List, Tuple, Iterator
from capa.features.common import Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.basicblock import BasicBlock
from capa.features.extractors.binexport2 import FunctionContext, BasicBlockContext
from capa.features.extractors.base_extractor import BBHandle, FunctionHandle
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
def extract_bb_tight_loop(fh: FunctionHandle, bbh: BBHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
bbi: BasicBlockContext = bbh.inner
idx = fhi.ctx.idx
basic_block_index: int = bbi.basic_block_index
target_edges: List[BinExport2.FlowGraph.Edge] = idx.target_edges_by_basic_block_index[basic_block_index]
if basic_block_index in (e.source_basic_block_index for e in target_edges):
basic_block_address: int = idx.get_basic_block_address(basic_block_index)
yield Characteristic("tight loop"), AbsoluteVirtualAddress(basic_block_address)
def extract_features(fh: FunctionHandle, bbh: BBHandle) -> Iterator[Tuple[Feature, Address]]:
"""extract basic block features"""
for bb_handler in BASIC_BLOCK_HANDLERS:
for feature, addr in bb_handler(fh, bbh):
yield feature, addr
yield BasicBlock(), bbh.address
BASIC_BLOCK_HANDLERS = (extract_bb_tight_loop,)

File diff suppressed because one or more lines are too long

View File

@@ -1,784 +0,0 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
The representation is generic to accommodate various source architectures.
In particular 32 and 64 bit versions of x86, ARM, PowerPC and MIPS have been
tested.
Multiple levels of deduping have been applied to make the format more compact
and avoid redundant data duplication. Some of this due to hard-earned
experience trying to cope with intentionally obfuscated malicious binaries.
Note in particular that the same instruction may occur in multiple basic
blocks and the same basic block in multiple functions (instruction and basic
block sharing). Implemented naively, malware can use this to cause
combinatorial explosion in memory usage, DOSing the analyst. This format
should store every unique expression, mnemonic, operand, instruction and
basic block only once instead of duplicating the information for every
instance of it.
This format does _not_ try to be 100% backwards compatible with the old
version. In particular, we do not store IDA's comment types, making lossless
porting of IDA comments impossible. We do however, store comments and
expression substitutions, so porting the actual data is possible, just not
the exact IDA type.
While it would be more natural to use addresses when defining call graph and
flow graph edges and other such references, it is more efficient to employ
one more level of indirection and use indices into the basic block or
function arrays instead. This is because addresses will usually use most of
the available 64 bit space while indices will be much smaller and compress
much better (less randomly distributed).
We omit all fields that are set to their default value anyways. Note that
this has two side effects:
- changing the defaults in this proto file will, in effect, change what's
read from disk
- the generated code has_* methods are somewhat less useful
WARNING: We omit the defaults manually in the code writing the data. Do not
change the defaults here without changing the code!
TODO(cblichmann): Link flow graphs to call graph nodes. The connection is
there via the address, but tricky to extract.
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class BinExport2(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class Meta(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
EXECUTABLE_NAME_FIELD_NUMBER: builtins.int
EXECUTABLE_ID_FIELD_NUMBER: builtins.int
ARCHITECTURE_NAME_FIELD_NUMBER: builtins.int
TIMESTAMP_FIELD_NUMBER: builtins.int
executable_name: builtins.str
"""Input binary filename including file extension but excluding file path.
example: "insider_gcc.exe"
"""
executable_id: builtins.str
"""Application defined executable id. Often the SHA256 hash of the input
binary.
"""
architecture_name: builtins.str
"""Input architecture name, e.g. x86-32."""
timestamp: builtins.int
"""When did this file get created? Unix time. This may be used for some
primitive versioning in case the file format ever changes.
"""
def __init__(
self,
*,
executable_name: builtins.str | None = ...,
executable_id: builtins.str | None = ...,
architecture_name: builtins.str | None = ...,
timestamp: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["architecture_name", b"architecture_name", "executable_id", b"executable_id", "executable_name", b"executable_name", "timestamp", b"timestamp"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["architecture_name", b"architecture_name", "executable_id", b"executable_id", "executable_name", b"executable_name", "timestamp", b"timestamp"]) -> None: ...
@typing_extensions.final
class CallGraph(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class Vertex(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.CallGraph.Vertex._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
NORMAL: BinExport2.CallGraph.Vertex._Type.ValueType # 0
"""Regular function with full disassembly."""
LIBRARY: BinExport2.CallGraph.Vertex._Type.ValueType # 1
"""This function is a well known library function."""
IMPORTED: BinExport2.CallGraph.Vertex._Type.ValueType # 2
"""Imported from a dynamic link library (e.g. dll)."""
THUNK: BinExport2.CallGraph.Vertex._Type.ValueType # 3
"""A thunk function, forwarding its work via an unconditional jump."""
INVALID: BinExport2.CallGraph.Vertex._Type.ValueType # 4
"""An invalid function (a function that contained invalid code or was
considered invalid by some heuristics).
"""
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
NORMAL: BinExport2.CallGraph.Vertex.Type.ValueType # 0
"""Regular function with full disassembly."""
LIBRARY: BinExport2.CallGraph.Vertex.Type.ValueType # 1
"""This function is a well known library function."""
IMPORTED: BinExport2.CallGraph.Vertex.Type.ValueType # 2
"""Imported from a dynamic link library (e.g. dll)."""
THUNK: BinExport2.CallGraph.Vertex.Type.ValueType # 3
"""A thunk function, forwarding its work via an unconditional jump."""
INVALID: BinExport2.CallGraph.Vertex.Type.ValueType # 4
"""An invalid function (a function that contained invalid code or was
considered invalid by some heuristics).
"""
ADDRESS_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
MANGLED_NAME_FIELD_NUMBER: builtins.int
DEMANGLED_NAME_FIELD_NUMBER: builtins.int
LIBRARY_INDEX_FIELD_NUMBER: builtins.int
MODULE_INDEX_FIELD_NUMBER: builtins.int
address: builtins.int
"""The function's entry point address. Messages need to be sorted, see
comment below on `vertex`.
"""
type: global___BinExport2.CallGraph.Vertex.Type.ValueType
mangled_name: builtins.str
"""If the function has a user defined, real name it will be given here.
main() is a proper name, sub_BAADF00D is not (auto generated dummy
name).
"""
demangled_name: builtins.str
"""Demangled name if the function is a mangled C++ function and we could
demangle it.
"""
library_index: builtins.int
"""If this is a library function, what is its index in library arrays."""
module_index: builtins.int
"""If module name, such as class name for DEX files, is present - index in
module table.
"""
def __init__(
self,
*,
address: builtins.int | None = ...,
type: global___BinExport2.CallGraph.Vertex.Type.ValueType | None = ...,
mangled_name: builtins.str | None = ...,
demangled_name: builtins.str | None = ...,
library_index: builtins.int | None = ...,
module_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "demangled_name", b"demangled_name", "library_index", b"library_index", "mangled_name", b"mangled_name", "module_index", b"module_index", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "demangled_name", b"demangled_name", "library_index", b"library_index", "mangled_name", b"mangled_name", "module_index", b"module_index", "type", b"type"]) -> None: ...
@typing_extensions.final
class Edge(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
SOURCE_VERTEX_INDEX_FIELD_NUMBER: builtins.int
TARGET_VERTEX_INDEX_FIELD_NUMBER: builtins.int
source_vertex_index: builtins.int
"""source and target index into the vertex repeated field."""
target_vertex_index: builtins.int
def __init__(
self,
*,
source_vertex_index: builtins.int | None = ...,
target_vertex_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_vertex_index", b"source_vertex_index", "target_vertex_index", b"target_vertex_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["source_vertex_index", b"source_vertex_index", "target_vertex_index", b"target_vertex_index"]) -> None: ...
VERTEX_FIELD_NUMBER: builtins.int
EDGE_FIELD_NUMBER: builtins.int
@property
def vertex(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.CallGraph.Vertex]:
"""vertices == functions in the call graph.
Important: Most downstream tooling (notably BinDiff), need these to be
sorted by `Vertex::address` (ascending). For C++, the
`BinExport2Writer` class enforces this invariant.
"""
@property
def edge(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.CallGraph.Edge]:
"""edges == calls in the call graph."""
def __init__(
self,
*,
vertex: collections.abc.Iterable[global___BinExport2.CallGraph.Vertex] | None = ...,
edge: collections.abc.Iterable[global___BinExport2.CallGraph.Edge] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["edge", b"edge", "vertex", b"vertex"]) -> None: ...
@typing_extensions.final
class Expression(google.protobuf.message.Message):
"""An operand consists of 1 or more expressions, linked together as a tree."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.Expression._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
SYMBOL: BinExport2.Expression._Type.ValueType # 1
IMMEDIATE_INT: BinExport2.Expression._Type.ValueType # 2
IMMEDIATE_FLOAT: BinExport2.Expression._Type.ValueType # 3
OPERATOR: BinExport2.Expression._Type.ValueType # 4
REGISTER: BinExport2.Expression._Type.ValueType # 5
SIZE_PREFIX: BinExport2.Expression._Type.ValueType # 6
DEREFERENCE: BinExport2.Expression._Type.ValueType # 7
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
SYMBOL: BinExport2.Expression.Type.ValueType # 1
IMMEDIATE_INT: BinExport2.Expression.Type.ValueType # 2
IMMEDIATE_FLOAT: BinExport2.Expression.Type.ValueType # 3
OPERATOR: BinExport2.Expression.Type.ValueType # 4
REGISTER: BinExport2.Expression.Type.ValueType # 5
SIZE_PREFIX: BinExport2.Expression.Type.ValueType # 6
DEREFERENCE: BinExport2.Expression.Type.ValueType # 7
TYPE_FIELD_NUMBER: builtins.int
SYMBOL_FIELD_NUMBER: builtins.int
IMMEDIATE_FIELD_NUMBER: builtins.int
PARENT_INDEX_FIELD_NUMBER: builtins.int
IS_RELOCATION_FIELD_NUMBER: builtins.int
type: global___BinExport2.Expression.Type.ValueType
"""IMMEDIATE_INT is by far the most common type and thus we can save some
space by omitting it as the default.
"""
symbol: builtins.str
"""Symbol for this expression. Interpretation depends on type. Examples
include: "eax", "[", "+"
"""
immediate: builtins.int
"""If the expression can be interpreted as an integer value (IMMEDIATE_INT)
the value is given here.
"""
parent_index: builtins.int
"""The parent expression. Example expression tree for the second operand of:
mov eax, b4 [ebx + 12]
"b4" --- "[" --- "+" --- "ebx"
\\ "12"
"""
is_relocation: builtins.bool
"""true if the expression has entry in relocation table"""
def __init__(
self,
*,
type: global___BinExport2.Expression.Type.ValueType | None = ...,
symbol: builtins.str | None = ...,
immediate: builtins.int | None = ...,
parent_index: builtins.int | None = ...,
is_relocation: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["immediate", b"immediate", "is_relocation", b"is_relocation", "parent_index", b"parent_index", "symbol", b"symbol", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["immediate", b"immediate", "is_relocation", b"is_relocation", "parent_index", b"parent_index", "symbol", b"symbol", "type", b"type"]) -> None: ...
@typing_extensions.final
class Operand(google.protobuf.message.Message):
"""An instruction may have 0 or more operands."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
EXPRESSION_INDEX_FIELD_NUMBER: builtins.int
@property
def expression_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Contains all expressions constituting this operand. All expressions
should be linked into a single tree, i.e. there should only be one
expression in this list with parent_index == NULL and all others should
descend from that. Rendering order for expressions on the same tree level
(siblings) is implicitly given by the order they are referenced in this
repeated field.
Implicit: expression sequence
"""
def __init__(
self,
*,
expression_index: collections.abc.Iterable[builtins.int] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["expression_index", b"expression_index"]) -> None: ...
@typing_extensions.final
class Mnemonic(google.protobuf.message.Message):
"""An instruction has exactly 1 mnemonic."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
name: builtins.str
"""Literal representation of the mnemonic, e.g.: "mov"."""
def __init__(
self,
*,
name: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["name", b"name"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["name", b"name"]) -> None: ...
@typing_extensions.final
class Instruction(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ADDRESS_FIELD_NUMBER: builtins.int
CALL_TARGET_FIELD_NUMBER: builtins.int
MNEMONIC_INDEX_FIELD_NUMBER: builtins.int
OPERAND_INDEX_FIELD_NUMBER: builtins.int
RAW_BYTES_FIELD_NUMBER: builtins.int
COMMENT_INDEX_FIELD_NUMBER: builtins.int
address: builtins.int
"""This will only be filled for instructions that do not just flow from the
immediately preceding instruction. Regular instructions will have to
calculate their own address by adding raw_bytes.size() to the previous
instruction's address.
"""
@property
def call_target(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""If this is a call instruction and call targets could be determined
they'll be given here. Note that we may or may not have a flow graph for
the target and thus cannot use an index into the flow graph table here.
We could potentially use call graph nodes, but linking instructions to
the call graph directly does not seem a good choice.
"""
mnemonic_index: builtins.int
"""Index into the mnemonic array of strings. Used for de-duping the data.
The default value is used for the most common mnemonic in the executable.
"""
@property
def operand_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Indices into the operand tree. On X86 this can be 0, 1 or 2 elements
long, 3 elements with VEX/EVEX.
Implicit: operand sequence
"""
raw_bytes: builtins.bytes
"""The unmodified input bytes corresponding to this instruction."""
@property
def comment_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Implicit: comment sequence"""
def __init__(
self,
*,
address: builtins.int | None = ...,
call_target: collections.abc.Iterable[builtins.int] | None = ...,
mnemonic_index: builtins.int | None = ...,
operand_index: collections.abc.Iterable[builtins.int] | None = ...,
raw_bytes: builtins.bytes | None = ...,
comment_index: collections.abc.Iterable[builtins.int] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "mnemonic_index", b"mnemonic_index", "raw_bytes", b"raw_bytes"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "call_target", b"call_target", "comment_index", b"comment_index", "mnemonic_index", b"mnemonic_index", "operand_index", b"operand_index", "raw_bytes", b"raw_bytes"]) -> None: ...
@typing_extensions.final
class BasicBlock(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class IndexRange(google.protobuf.message.Message):
"""This is a space optimization. The instructions for an individual basic
block will usually be in a continuous index range. Thus it is more
efficient to store the range instead of individual indices. However, this
does not hold true for all basic blocks, so we need to be able to store
multiple index ranges per block.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BEGIN_INDEX_FIELD_NUMBER: builtins.int
END_INDEX_FIELD_NUMBER: builtins.int
begin_index: builtins.int
"""These work like begin and end iterators, i.e. the sequence is
[begin_index, end_index). If the sequence only contains a single
element end_index will be omitted.
"""
end_index: builtins.int
def __init__(
self,
*,
begin_index: builtins.int | None = ...,
end_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["begin_index", b"begin_index", "end_index", b"end_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["begin_index", b"begin_index", "end_index", b"end_index"]) -> None: ...
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
@property
def instruction_index(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.BasicBlock.IndexRange]:
"""Implicit: instruction sequence"""
def __init__(
self,
*,
instruction_index: collections.abc.Iterable[global___BinExport2.BasicBlock.IndexRange] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index"]) -> None: ...
@typing_extensions.final
class FlowGraph(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class Edge(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.FlowGraph.Edge._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
CONDITION_TRUE: BinExport2.FlowGraph.Edge._Type.ValueType # 1
CONDITION_FALSE: BinExport2.FlowGraph.Edge._Type.ValueType # 2
UNCONDITIONAL: BinExport2.FlowGraph.Edge._Type.ValueType # 3
SWITCH: BinExport2.FlowGraph.Edge._Type.ValueType # 4
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
CONDITION_TRUE: BinExport2.FlowGraph.Edge.Type.ValueType # 1
CONDITION_FALSE: BinExport2.FlowGraph.Edge.Type.ValueType # 2
UNCONDITIONAL: BinExport2.FlowGraph.Edge.Type.ValueType # 3
SWITCH: BinExport2.FlowGraph.Edge.Type.ValueType # 4
SOURCE_BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
TARGET_BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
IS_BACK_EDGE_FIELD_NUMBER: builtins.int
source_basic_block_index: builtins.int
"""Source instruction will always be the last instruction of the source
basic block, target instruction the first instruction of the target
basic block.
"""
target_basic_block_index: builtins.int
type: global___BinExport2.FlowGraph.Edge.Type.ValueType
is_back_edge: builtins.bool
"""Indicates whether this is a loop edge as determined by Lengauer-Tarjan."""
def __init__(
self,
*,
source_basic_block_index: builtins.int | None = ...,
target_basic_block_index: builtins.int | None = ...,
type: global___BinExport2.FlowGraph.Edge.Type.ValueType | None = ...,
is_back_edge: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["is_back_edge", b"is_back_edge", "source_basic_block_index", b"source_basic_block_index", "target_basic_block_index", b"target_basic_block_index", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["is_back_edge", b"is_back_edge", "source_basic_block_index", b"source_basic_block_index", "target_basic_block_index", b"target_basic_block_index", "type", b"type"]) -> None: ...
BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
ENTRY_BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
EDGE_FIELD_NUMBER: builtins.int
@property
def basic_block_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Basic blocks are sorted by address."""
entry_basic_block_index: builtins.int
"""The flow graph's entry point address is the first instruction of the
entry_basic_block.
"""
@property
def edge(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.FlowGraph.Edge]: ...
def __init__(
self,
*,
basic_block_index: collections.abc.Iterable[builtins.int] | None = ...,
entry_basic_block_index: builtins.int | None = ...,
edge: collections.abc.Iterable[global___BinExport2.FlowGraph.Edge] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["entry_basic_block_index", b"entry_basic_block_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["basic_block_index", b"basic_block_index", "edge", b"edge", "entry_basic_block_index", b"entry_basic_block_index"]) -> None: ...
@typing_extensions.final
class Reference(google.protobuf.message.Message):
"""Generic reference class used for address comments (deprecated), string
references and expression substitutions. It allows referencing from an
instruction, operand, expression subtree tuple to a de-duped string in the
string table.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
INSTRUCTION_OPERAND_INDEX_FIELD_NUMBER: builtins.int
OPERAND_EXPRESSION_INDEX_FIELD_NUMBER: builtins.int
STRING_TABLE_INDEX_FIELD_NUMBER: builtins.int
instruction_index: builtins.int
"""Index into the global instruction table."""
instruction_operand_index: builtins.int
"""Index into the operand array local to an instruction."""
operand_expression_index: builtins.int
"""Index into the expression array local to an operand."""
string_table_index: builtins.int
"""Index into the global string table."""
def __init__(
self,
*,
instruction_index: builtins.int | None = ...,
instruction_operand_index: builtins.int | None = ...,
operand_expression_index: builtins.int | None = ...,
string_table_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "string_table_index", b"string_table_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "string_table_index", b"string_table_index"]) -> None: ...
@typing_extensions.final
class DataReference(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
ADDRESS_FIELD_NUMBER: builtins.int
instruction_index: builtins.int
"""Index into the global instruction table."""
address: builtins.int
"""Address being referred."""
def __init__(
self,
*,
instruction_index: builtins.int | None = ...,
address: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "instruction_index", b"instruction_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "instruction_index", b"instruction_index"]) -> None: ...
@typing_extensions.final
class Comment(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.Comment._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT: BinExport2.Comment._Type.ValueType # 0
"""A regular instruction comment. Typically displayed next to the
instruction disassembly.
"""
ANTERIOR: BinExport2.Comment._Type.ValueType # 1
"""A comment line that is typically displayed before (above) the
instruction it refers to.
"""
POSTERIOR: BinExport2.Comment._Type.ValueType # 2
"""Like ANTERIOR, but a typically displayed after (below)."""
FUNCTION: BinExport2.Comment._Type.ValueType # 3
"""Similar to an ANTERIOR comment, but applies to the beginning of an
identified function. Programs displaying the proto may choose to render
these differently (e.g. above an inferred function signature).
"""
ENUM: BinExport2.Comment._Type.ValueType # 4
"""Named constants, bitfields and similar."""
LOCATION: BinExport2.Comment._Type.ValueType # 5
"""Named locations, usually the target of a jump."""
GLOBAL_REFERENCE: BinExport2.Comment._Type.ValueType # 6
"""Data cross references."""
LOCAL_REFERENCE: BinExport2.Comment._Type.ValueType # 7
"""Local/stack variables."""
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
DEFAULT: BinExport2.Comment.Type.ValueType # 0
"""A regular instruction comment. Typically displayed next to the
instruction disassembly.
"""
ANTERIOR: BinExport2.Comment.Type.ValueType # 1
"""A comment line that is typically displayed before (above) the
instruction it refers to.
"""
POSTERIOR: BinExport2.Comment.Type.ValueType # 2
"""Like ANTERIOR, but a typically displayed after (below)."""
FUNCTION: BinExport2.Comment.Type.ValueType # 3
"""Similar to an ANTERIOR comment, but applies to the beginning of an
identified function. Programs displaying the proto may choose to render
these differently (e.g. above an inferred function signature).
"""
ENUM: BinExport2.Comment.Type.ValueType # 4
"""Named constants, bitfields and similar."""
LOCATION: BinExport2.Comment.Type.ValueType # 5
"""Named locations, usually the target of a jump."""
GLOBAL_REFERENCE: BinExport2.Comment.Type.ValueType # 6
"""Data cross references."""
LOCAL_REFERENCE: BinExport2.Comment.Type.ValueType # 7
"""Local/stack variables."""
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
INSTRUCTION_OPERAND_INDEX_FIELD_NUMBER: builtins.int
OPERAND_EXPRESSION_INDEX_FIELD_NUMBER: builtins.int
STRING_TABLE_INDEX_FIELD_NUMBER: builtins.int
REPEATABLE_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
instruction_index: builtins.int
"""Index into the global instruction table. This is here to enable
comment processing without having to iterate over all instructions.
There is an N:M mapping of instructions to comments.
"""
instruction_operand_index: builtins.int
"""Index into the operand array local to an instruction."""
operand_expression_index: builtins.int
"""Index into the expression array local to an operand, like in Reference.
This is not currently used, but allows to implement expression
substitutions.
"""
string_table_index: builtins.int
"""Index into the global string table."""
repeatable: builtins.bool
"""Comment is propagated to all locations that reference the original
location.
"""
type: global___BinExport2.Comment.Type.ValueType
def __init__(
self,
*,
instruction_index: builtins.int | None = ...,
instruction_operand_index: builtins.int | None = ...,
operand_expression_index: builtins.int | None = ...,
string_table_index: builtins.int | None = ...,
repeatable: builtins.bool | None = ...,
type: global___BinExport2.Comment.Type.ValueType | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "repeatable", b"repeatable", "string_table_index", b"string_table_index", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "repeatable", b"repeatable", "string_table_index", b"string_table_index", "type", b"type"]) -> None: ...
@typing_extensions.final
class Section(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ADDRESS_FIELD_NUMBER: builtins.int
SIZE_FIELD_NUMBER: builtins.int
FLAG_R_FIELD_NUMBER: builtins.int
FLAG_W_FIELD_NUMBER: builtins.int
FLAG_X_FIELD_NUMBER: builtins.int
address: builtins.int
"""Section start address."""
size: builtins.int
"""Section size."""
flag_r: builtins.bool
"""Read flag of the section, True when section is readable."""
flag_w: builtins.bool
"""Write flag of the section, True when section is writable."""
flag_x: builtins.bool
"""Execute flag of the section, True when section is executable."""
def __init__(
self,
*,
address: builtins.int | None = ...,
size: builtins.int | None = ...,
flag_r: builtins.bool | None = ...,
flag_w: builtins.bool | None = ...,
flag_x: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "flag_r", b"flag_r", "flag_w", b"flag_w", "flag_x", b"flag_x", "size", b"size"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "flag_r", b"flag_r", "flag_w", b"flag_w", "flag_x", b"flag_x", "size", b"size"]) -> None: ...
@typing_extensions.final
class Library(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
IS_STATIC_FIELD_NUMBER: builtins.int
LOAD_ADDRESS_FIELD_NUMBER: builtins.int
NAME_FIELD_NUMBER: builtins.int
is_static: builtins.bool
"""If this library is statically linked."""
load_address: builtins.int
"""Address where this library was loaded, 0 if unknown."""
name: builtins.str
"""Name of the library (format is platform-dependent)."""
def __init__(
self,
*,
is_static: builtins.bool | None = ...,
load_address: builtins.int | None = ...,
name: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["is_static", b"is_static", "load_address", b"load_address", "name", b"name"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["is_static", b"is_static", "load_address", b"load_address", "name", b"name"]) -> None: ...
@typing_extensions.final
class Module(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
name: builtins.str
"""Name, such as Java class name. Platform-dependent."""
def __init__(
self,
*,
name: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["name", b"name"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["name", b"name"]) -> None: ...
META_INFORMATION_FIELD_NUMBER: builtins.int
EXPRESSION_FIELD_NUMBER: builtins.int
OPERAND_FIELD_NUMBER: builtins.int
MNEMONIC_FIELD_NUMBER: builtins.int
INSTRUCTION_FIELD_NUMBER: builtins.int
BASIC_BLOCK_FIELD_NUMBER: builtins.int
FLOW_GRAPH_FIELD_NUMBER: builtins.int
CALL_GRAPH_FIELD_NUMBER: builtins.int
STRING_TABLE_FIELD_NUMBER: builtins.int
ADDRESS_COMMENT_FIELD_NUMBER: builtins.int
COMMENT_FIELD_NUMBER: builtins.int
STRING_REFERENCE_FIELD_NUMBER: builtins.int
EXPRESSION_SUBSTITUTION_FIELD_NUMBER: builtins.int
SECTION_FIELD_NUMBER: builtins.int
LIBRARY_FIELD_NUMBER: builtins.int
DATA_REFERENCE_FIELD_NUMBER: builtins.int
MODULE_FIELD_NUMBER: builtins.int
@property
def meta_information(self) -> global___BinExport2.Meta: ...
@property
def expression(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Expression]: ...
@property
def operand(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Operand]: ...
@property
def mnemonic(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Mnemonic]: ...
@property
def instruction(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Instruction]: ...
@property
def basic_block(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.BasicBlock]: ...
@property
def flow_graph(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.FlowGraph]: ...
@property
def call_graph(self) -> global___BinExport2.CallGraph: ...
@property
def string_table(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
@property
def address_comment(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Reference]:
"""No longer written. This is here so that BinDiff can work with older
BinExport files.
"""
@property
def comment(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Comment]:
"""Rich comment index used for BinDiff's comment porting."""
@property
def string_reference(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Reference]: ...
@property
def expression_substitution(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Reference]: ...
@property
def section(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Section]: ...
@property
def library(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Library]: ...
@property
def data_reference(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.DataReference]: ...
@property
def module(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Module]: ...
def __init__(
self,
*,
meta_information: global___BinExport2.Meta | None = ...,
expression: collections.abc.Iterable[global___BinExport2.Expression] | None = ...,
operand: collections.abc.Iterable[global___BinExport2.Operand] | None = ...,
mnemonic: collections.abc.Iterable[global___BinExport2.Mnemonic] | None = ...,
instruction: collections.abc.Iterable[global___BinExport2.Instruction] | None = ...,
basic_block: collections.abc.Iterable[global___BinExport2.BasicBlock] | None = ...,
flow_graph: collections.abc.Iterable[global___BinExport2.FlowGraph] | None = ...,
call_graph: global___BinExport2.CallGraph | None = ...,
string_table: collections.abc.Iterable[builtins.str] | None = ...,
address_comment: collections.abc.Iterable[global___BinExport2.Reference] | None = ...,
comment: collections.abc.Iterable[global___BinExport2.Comment] | None = ...,
string_reference: collections.abc.Iterable[global___BinExport2.Reference] | None = ...,
expression_substitution: collections.abc.Iterable[global___BinExport2.Reference] | None = ...,
section: collections.abc.Iterable[global___BinExport2.Section] | None = ...,
library: collections.abc.Iterable[global___BinExport2.Library] | None = ...,
data_reference: collections.abc.Iterable[global___BinExport2.DataReference] | None = ...,
module: collections.abc.Iterable[global___BinExport2.Module] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["call_graph", b"call_graph", "meta_information", b"meta_information"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address_comment", b"address_comment", "basic_block", b"basic_block", "call_graph", b"call_graph", "comment", b"comment", "data_reference", b"data_reference", "expression", b"expression", "expression_substitution", b"expression_substitution", "flow_graph", b"flow_graph", "instruction", b"instruction", "library", b"library", "meta_information", b"meta_information", "mnemonic", b"mnemonic", "module", b"module", "operand", b"operand", "section", b"section", "string_reference", b"string_reference", "string_table", b"string_table"]) -> None: ...
global___BinExport2 = BinExport2

View File

@@ -1,130 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Set, List, Tuple, Iterator
import capa.features.extractors.elf
import capa.features.extractors.common
import capa.features.extractors.binexport2.file
import capa.features.extractors.binexport2.insn
import capa.features.extractors.binexport2.helpers
import capa.features.extractors.binexport2.function
import capa.features.extractors.binexport2.basicblock
from capa.features.common import OS, Arch, Format, Feature
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.extractors.binexport2 import (
AddressSpace,
AnalysisContext,
BinExport2Index,
FunctionContext,
BasicBlockContext,
BinExport2Analysis,
InstructionContext,
)
from capa.features.extractors.base_extractor import (
BBHandle,
InsnHandle,
SampleHashes,
FunctionHandle,
StaticFeatureExtractor,
)
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
class BinExport2FeatureExtractor(StaticFeatureExtractor):
def __init__(self, be2: BinExport2, buf: bytes):
super().__init__(hashes=SampleHashes.from_bytes(buf))
self.be2: BinExport2 = be2
self.buf: bytes = buf
self.idx: BinExport2Index = BinExport2Index(self.be2)
self.analysis: BinExport2Analysis = BinExport2Analysis(self.be2, self.idx, self.buf)
address_space: AddressSpace = AddressSpace.from_buf(buf, self.analysis.base_address)
self.ctx: AnalysisContext = AnalysisContext(self.buf, self.be2, self.idx, self.analysis, address_space)
self.global_features: List[Tuple[Feature, Address]] = []
self.global_features.extend(list(capa.features.extractors.common.extract_format(self.buf)))
self.global_features.extend(list(capa.features.extractors.common.extract_os(self.buf)))
self.global_features.extend(list(capa.features.extractors.common.extract_arch(self.buf)))
self.format: Set[str] = set()
self.os: Set[str] = set()
self.arch: Set[str] = set()
for feature, _ in self.global_features:
assert isinstance(feature.value, str)
if isinstance(feature, Format):
self.format.add(feature.value)
elif isinstance(feature, OS):
self.os.add(feature.value)
elif isinstance(feature, Arch):
self.arch.add(feature.value)
else:
raise ValueError("unexpected global feature: %s", feature)
def get_base_address(self) -> AbsoluteVirtualAddress:
return AbsoluteVirtualAddress(self.analysis.base_address)
def extract_global_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from self.global_features
def extract_file_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.file.extract_features(self.be2, self.buf)
def get_functions(self) -> Iterator[FunctionHandle]:
for flow_graph_index, flow_graph in enumerate(self.be2.flow_graph):
entry_basic_block_index: int = flow_graph.entry_basic_block_index
flow_graph_address: int = self.idx.get_basic_block_address(entry_basic_block_index)
vertex_idx: int = self.idx.vertex_index_by_address[flow_graph_address]
be2_vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[vertex_idx]
# skip thunks
if capa.features.extractors.binexport2.helpers.is_vertex_type(
be2_vertex, BinExport2.CallGraph.Vertex.Type.THUNK
):
continue
yield FunctionHandle(
AbsoluteVirtualAddress(flow_graph_address),
inner=FunctionContext(self.ctx, flow_graph_index, self.format, self.os, self.arch),
)
def extract_function_features(self, fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.function.extract_features(fh)
def get_basic_blocks(self, fh: FunctionHandle) -> Iterator[BBHandle]:
fhi: FunctionContext = fh.inner
flow_graph_index: int = fhi.flow_graph_index
flow_graph: BinExport2.FlowGraph = self.be2.flow_graph[flow_graph_index]
for basic_block_index in flow_graph.basic_block_index:
basic_block_address: int = self.idx.get_basic_block_address(basic_block_index)
yield BBHandle(
address=AbsoluteVirtualAddress(basic_block_address),
inner=BasicBlockContext(basic_block_index),
)
def extract_basic_block_features(self, fh: FunctionHandle, bbh: BBHandle) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.basicblock.extract_features(fh, bbh)
def get_instructions(self, fh: FunctionHandle, bbh: BBHandle) -> Iterator[InsnHandle]:
bbi: BasicBlockContext = bbh.inner
basic_block: BinExport2.BasicBlock = self.be2.basic_block[bbi.basic_block_index]
for instruction_index, _, instruction_address in self.idx.basic_block_instructions(basic_block):
yield InsnHandle(
address=AbsoluteVirtualAddress(instruction_address),
inner=InstructionContext(instruction_index),
)
def extract_insn_features(
self, fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.insn.extract_features(fh, bbh, ih)

View File

@@ -1,80 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import io
import logging
from typing import Tuple, Iterator
import pefile
from elftools.elf.elffile import ELFFile
import capa.features.common
import capa.features.extractors.common
import capa.features.extractors.pefile
import capa.features.extractors.elffile
from capa.features.common import Feature
from capa.features.address import Address
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
def extract_file_export_names(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: pefile.PE = pefile.PE(data=buf)
yield from capa.features.extractors.pefile.extract_file_export_names(pe)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
yield from capa.features.extractors.elffile.extract_file_export_names(elf)
else:
logger.warning("unsupported format")
def extract_file_import_names(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: pefile.PE = pefile.PE(data=buf)
yield from capa.features.extractors.pefile.extract_file_import_names(pe)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
yield from capa.features.extractors.elffile.extract_file_import_names(elf)
else:
logger.warning("unsupported format")
def extract_file_section_names(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: pefile.PE = pefile.PE(data=buf)
yield from capa.features.extractors.pefile.extract_file_section_names(pe)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
yield from capa.features.extractors.elffile.extract_file_section_names(elf)
else:
logger.warning("unsupported format")
def extract_file_strings(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.common.extract_file_strings(buf)
def extract_file_format(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.common.extract_format(buf)
def extract_features(be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
"""extract file features"""
for file_handler in FILE_HANDLERS:
for feature, addr in file_handler(be2, buf):
yield feature, addr
FILE_HANDLERS = (
extract_file_export_names,
extract_file_import_names,
extract_file_strings,
extract_file_section_names,
extract_file_format,
)

View File

@@ -1,72 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import List, Tuple, Iterator
from capa.features.file import FunctionName
from capa.features.common import Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.extractors import loops
from capa.features.extractors.binexport2 import BinExport2Index, FunctionContext
from capa.features.extractors.base_extractor import FunctionHandle
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
def extract_function_calls_to(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
flow_graph_index: int = fhi.flow_graph_index
flow_graph_address: int = idx.flow_graph_address_by_index[flow_graph_index]
vertex_index: int = idx.vertex_index_by_address[flow_graph_address]
for caller_index in idx.callers_by_vertex_index[vertex_index]:
caller: BinExport2.CallGraph.Vertex = be2.call_graph.vertex[caller_index]
caller_address: int = caller.address
yield Characteristic("calls to"), AbsoluteVirtualAddress(caller_address)
def extract_function_loop(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
be2: BinExport2 = fhi.ctx.be2
flow_graph_index: int = fhi.flow_graph_index
flow_graph: BinExport2.FlowGraph = be2.flow_graph[flow_graph_index]
edges: List[Tuple[int, int]] = []
for edge in flow_graph.edge:
edges.append((edge.source_basic_block_index, edge.target_basic_block_index))
if loops.has_loop(edges):
yield Characteristic("loop"), fh.address
def extract_function_name(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
flow_graph_index: int = fhi.flow_graph_index
flow_graph_address: int = idx.flow_graph_address_by_index[flow_graph_index]
vertex_index: int = idx.vertex_index_by_address[flow_graph_address]
vertex: BinExport2.CallGraph.Vertex = be2.call_graph.vertex[vertex_index]
if vertex.HasField("mangled_name"):
yield FunctionName(vertex.mangled_name), fh.address
def extract_features(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
for func_handler in FUNCTION_HANDLERS:
for feature, addr in func_handler(fh):
yield feature, addr
FUNCTION_HANDLERS = (extract_function_calls_to, extract_function_loop, extract_function_name)

View File

@@ -1,650 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import re
from typing import Set, Dict, List, Tuple, Union, Iterator, Optional
from collections import defaultdict
from dataclasses import dataclass
import capa.features.extractors.helpers
import capa.features.extractors.binexport2.helpers
from capa.features.common import ARCH_I386, ARCH_AMD64, ARCH_AARCH64
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
HAS_ARCH32 = {ARCH_I386}
HAS_ARCH64 = {ARCH_AARCH64, ARCH_AMD64}
HAS_ARCH_INTEL = {ARCH_I386, ARCH_AMD64}
HAS_ARCH_ARM = {ARCH_AARCH64}
def mask_immediate(arch: Set[str], immediate: int) -> int:
if arch & HAS_ARCH64:
immediate &= 0xFFFFFFFFFFFFFFFF
elif arch & HAS_ARCH32:
immediate &= 0xFFFFFFFF
return immediate
def twos_complement(arch: Set[str], immediate: int, default: Optional[int] = None) -> int:
if default is not None:
return capa.features.extractors.helpers.twos_complement(immediate, default)
elif arch & HAS_ARCH64:
return capa.features.extractors.helpers.twos_complement(immediate, 64)
elif arch & HAS_ARCH32:
return capa.features.extractors.helpers.twos_complement(immediate, 32)
return immediate
def is_address_mapped(be2: BinExport2, address: int) -> bool:
"""return True if the given address is mapped"""
sections_with_perms: Iterator[BinExport2.Section] = filter(lambda s: s.flag_r or s.flag_w or s.flag_x, be2.section)
return any(section.address <= address < section.address + section.size for section in sections_with_perms)
def is_vertex_type(vertex: BinExport2.CallGraph.Vertex, type_: BinExport2.CallGraph.Vertex.Type.ValueType) -> bool:
return vertex.HasField("type") and vertex.type == type_
# internal to `build_expression_tree`
# this is unstable: it is subject to change, so don't rely on it!
def _prune_expression_tree_empty_shifts(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
tree_index: int,
):
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
if expression.type == BinExport2.Expression.OPERATOR:
if len(children_tree_indexes) == 0 and expression.symbol in ("lsl", "lsr"):
# Ghidra may emit superfluous lsl nodes with no children.
# https://github.com/mandiant/capa/pull/2340/files#r1750003919
# Which is maybe: https://github.com/NationalSecurityAgency/ghidra/issues/6821#issuecomment-2295394697
#
# Which seems to be as if the shift wasn't there (shift of #0)
# so we want to remove references to this node from any parent nodes.
for tree_node in expression_tree:
if tree_index in tree_node:
tree_node.remove(tree_index)
return
for child_tree_index in children_tree_indexes:
_prune_expression_tree_empty_shifts(be2, operand, expression_tree, child_tree_index)
# internal to `build_expression_tree`
# this is unstable: it is subject to change, so don't rely on it!
def _prune_expression_tree_empty_commas(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
tree_index: int,
):
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
if expression.type == BinExport2.Expression.OPERATOR:
if len(children_tree_indexes) == 1 and expression.symbol == ",":
# Due to the above pruning of empty LSL or LSR expressions,
# the parents might need to be fixed up.
#
# Specifically, if the pruned node was part of a comma list with two children,
# now there's only a single child, which renders as an extra comma,
# so we replace references to the comma node with the immediate child.
#
# A more correct way of doing this might be to walk up the parents and do fixups,
# but I'm not quite sure how to do this yet. Just do two passes right now.
child = children_tree_indexes[0]
for tree_node in expression_tree:
tree_node.index
if tree_index in tree_node:
tree_node[tree_node.index(tree_index)] = child
return
for child_tree_index in children_tree_indexes:
_prune_expression_tree_empty_commas(be2, operand, expression_tree, child_tree_index)
# internal to `build_expression_tree`
# this is unstable: it is subject to change, so don't rely on it!
def _prune_expression_tree(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
):
_prune_expression_tree_empty_shifts(be2, operand, expression_tree, 0)
_prune_expression_tree_empty_commas(be2, operand, expression_tree, 0)
# this is unstable: it is subject to change, so don't rely on it!
def _build_expression_tree(
be2: BinExport2,
operand: BinExport2.Operand,
) -> List[List[int]]:
# The reconstructed expression tree layout, linking parent nodes to their children.
#
# There is one list of integers for each expression in the operand.
# These integers are indexes of other expressions in the same operand,
# which are the children of that expression.
#
# So:
#
# [ [1, 3], [2], [], [4], [5], []]
#
# means the first expression has two children, at index 1 and 3,
# and the tree looks like:
#
# 0
# / \
# 1 3
# | |
# 2 4
# |
# 5
#
# Remember, these are the indices into the entries in operand.expression_index.
if len(operand.expression_index) == 0:
# Ghidra bug where empty operands (no expressions) may
# exist (see https://github.com/NationalSecurityAgency/ghidra/issues/6817)
return []
tree: List[List[int]] = []
for i, expression_index in enumerate(operand.expression_index):
children = []
# scan all subsequent expressions, looking for those that have parent_index == current.expression_index
for j, candidate_index in enumerate(operand.expression_index[i + 1 :]):
candidate = be2.expression[candidate_index]
if candidate.parent_index == expression_index:
children.append(i + j + 1)
tree.append(children)
_prune_expression_tree(be2, operand, tree)
_prune_expression_tree(be2, operand, tree)
return tree
def _fill_operand_expression_list(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
tree_index: int,
expression_list: List[BinExport2.Expression],
):
"""
Walk the given expression tree and collect the expression nodes in-order.
"""
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
if expression.type == BinExport2.Expression.REGISTER:
assert len(children_tree_indexes) == 0
expression_list.append(expression)
return
elif expression.type == BinExport2.Expression.SYMBOL:
assert len(children_tree_indexes) <= 1
expression_list.append(expression)
if len(children_tree_indexes) == 0:
return
elif len(children_tree_indexes) == 1:
# like: v
# from: mov v0.D[0x1], x9
# |
# 0
# .
# |
# D
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
else:
raise NotImplementedError(len(children_tree_indexes))
elif expression.type == BinExport2.Expression.IMMEDIATE_INT:
assert len(children_tree_indexes) == 0
expression_list.append(expression)
return
elif expression.type == BinExport2.Expression.SIZE_PREFIX:
# like: b4
#
# We might want to use this occasionally, such as to disambiguate the
# size of MOVs into/out of memory. But I'm not sure when/where we need that yet.
#
# IDA spams this size prefix hint *everywhere*, so we can't rely on the exporter
# to provide it only when necessary.
assert len(children_tree_indexes) == 1
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
elif expression.type == BinExport2.Expression.OPERATOR:
if len(children_tree_indexes) == 1:
# prefix operator, like "ds:"
expression_list.append(expression)
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
elif len(children_tree_indexes) == 2:
# infix operator: like "+" in "ebp+10"
child_a = children_tree_indexes[0]
child_b = children_tree_indexes[1]
_fill_operand_expression_list(be2, operand, expression_tree, child_a, expression_list)
expression_list.append(expression)
_fill_operand_expression_list(be2, operand, expression_tree, child_b, expression_list)
return
elif len(children_tree_indexes) == 3:
# infix operator: like "+" in "ebp+ecx+10"
child_a = children_tree_indexes[0]
child_b = children_tree_indexes[1]
child_c = children_tree_indexes[2]
_fill_operand_expression_list(be2, operand, expression_tree, child_a, expression_list)
expression_list.append(expression)
_fill_operand_expression_list(be2, operand, expression_tree, child_b, expression_list)
expression_list.append(expression)
_fill_operand_expression_list(be2, operand, expression_tree, child_c, expression_list)
return
else:
raise NotImplementedError(len(children_tree_indexes))
elif expression.type == BinExport2.Expression.DEREFERENCE:
assert len(children_tree_indexes) == 1
expression_list.append(expression)
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
elif expression.type == BinExport2.Expression.IMMEDIATE_FLOAT:
raise NotImplementedError(expression.type)
else:
raise NotImplementedError(expression.type)
def get_operand_expressions(be2: BinExport2, op: BinExport2.Operand) -> List[BinExport2.Expression]:
tree = _build_expression_tree(be2, op)
expressions: List[BinExport2.Expression] = []
_fill_operand_expression_list(be2, op, tree, 0, expressions)
return expressions
def get_operand_register_expression(be2: BinExport2, operand: BinExport2.Operand) -> Optional[BinExport2.Expression]:
if len(operand.expression_index) == 1:
expression: BinExport2.Expression = be2.expression[operand.expression_index[0]]
if expression.type == BinExport2.Expression.REGISTER:
return expression
return None
def get_operand_immediate_expression(be2: BinExport2, operand: BinExport2.Operand) -> Optional[BinExport2.Expression]:
if len(operand.expression_index) == 1:
# - type: IMMEDIATE_INT
# immediate: 20588728364
# parent_index: 0
expression: BinExport2.Expression = be2.expression[operand.expression_index[0]]
if expression.type == BinExport2.Expression.IMMEDIATE_INT:
return expression
elif len(operand.expression_index) == 2:
# from IDA, which provides a size hint for every operand,
# we get the following pattern for immediate constants:
#
# - type: SIZE_PREFIX
# symbol: "b8"
# - type: IMMEDIATE_INT
# immediate: 20588728364
# parent_index: 0
expression0: BinExport2.Expression = be2.expression[operand.expression_index[0]]
expression1: BinExport2.Expression = be2.expression[operand.expression_index[1]]
if expression0.type == BinExport2.Expression.SIZE_PREFIX:
if expression1.type == BinExport2.Expression.IMMEDIATE_INT:
return expression1
return None
def get_instruction_mnemonic(be2: BinExport2, instruction: BinExport2.Instruction) -> str:
return be2.mnemonic[instruction.mnemonic_index].name.lower()
def get_instruction_operands(be2: BinExport2, instruction: BinExport2.Instruction) -> List[BinExport2.Operand]:
return [be2.operand[operand_index] for operand_index in instruction.operand_index]
def split_with_delimiters(s: str, delimiters: Tuple[str, ...]) -> Iterator[str]:
"""
Splits a string by any of the provided delimiter characters,
including the delimiters in the results.
Args:
string: The string to split.
delimiters: A string containing the characters to use as delimiters.
"""
start = 0
for i, char in enumerate(s):
if char in delimiters:
yield s[start:i]
yield char
start = i + 1
if start < len(s):
yield s[start:]
BinExport2OperandPattern = Union[str, Tuple[str, ...]]
@dataclass
class BinExport2InstructionPattern:
"""
This describes a way to match disassembled instructions, with mnemonics and operands.
You can specify constraints on the instruction, via:
- the mnemonics, like "mov",
- number of operands, and
- format of each operand, "[reg, reg, #int]".
During matching, you can also capture a single element, to see its concrete value.
For example, given the pattern:
mov reg0, #int0 ; capture int0
and the instruction:
mov eax, 1
Then the capture will contain the immediate integer 1.
This matcher uses the BinExport2 data layout under the hood.
"""
mnemonics: Tuple[str, ...]
operands: Tuple[Union[str, BinExport2OperandPattern], ...]
capture: Optional[str]
@classmethod
def from_str(cls, query: str):
"""
Parse a pattern string into a Pattern instance.
The supported syntax is like this:
br reg
br reg ; capture reg
br reg(stack) ; capture reg
br reg(not-stack) ; capture reg
mov reg0, reg1 ; capture reg0
adrp reg, #int ; capture #int
add reg, reg, #int ; capture #int
ldr reg0, [reg1] ; capture reg1
ldr|str reg, [reg, #int] ; capture #int
ldr|str reg, [reg(stack), #int] ; capture #int
ldr|str reg, [reg(not-stack), #int] ; capture #int
ldr|str reg, [reg, #int]! ; capture #int
ldr|str reg, [reg], #int ; capture #int
ldp|stp reg, reg, [reg, #int] ; capture #int
ldp|stp reg, reg, [reg, #int]! ; capture #int
ldp|stp reg, reg, [reg], #int ; capture #int
"""
#
# The implementation of the parser here is obviously ugly.
# Its handwritten and probably fragile. But since we don't
# expect this to be widely used, its probably ok.
# Don't hesitate to rewrite this if it becomes more important.
#
# Note that this doesn't have to be very performant.
# We expect these patterns to be parsed once upfront and then reused
# (globally at the module level?) rather than within any loop.
#
pattern, _, comment = query.strip().partition(";")
# we don't support fs: yet
assert ":" not in pattern
# from "capture #int" to "#int"
if comment:
comment = comment.strip()
assert comment.startswith("capture ")
capture = comment[len("capture ") :]
else:
capture = None
# from "ldr|str ..." to ["ldr", "str"]
pattern = pattern.strip()
mnemonic, _, rest = pattern.partition(" ")
mnemonics = mnemonic.split("|")
operands: List[Union[str, Tuple[str, ...]]] = []
while rest:
rest = rest.strip()
if not rest.startswith("["):
# If its not a dereference, which looks like `[op, op, op, ...]`,
# then its a simple operand, which we can split by the next comma.
operand, _, rest = rest.partition(", ")
rest = rest.strip()
operands.append(operand)
else:
# This looks like a dereference, something like `[op, op, op, ...]`.
# Since these can't be nested, look for the next ] and then parse backwards.
deref_end = rest.index("]")
try:
deref_end = rest.index(", ", deref_end)
deref_end += len(", ")
except ValueError:
deref = rest
rest = ""
else:
deref = rest[:deref_end]
rest = rest[deref_end:]
rest = rest.strip()
deref = deref.rstrip(" ")
deref = deref.rstrip(",")
# like: [reg, #int]!
has_postindex_writeback = deref.endswith("!")
deref = deref.rstrip("!")
deref = deref.rstrip("]")
deref = deref.lstrip("[")
parts = tuple(split_with_delimiters(deref, (",", "+", "*")))
parts = tuple(s.strip() for s in parts)
# emit operands in this order to match
# how BinExport2 expressions are flatted
# by get_operand_expressions
if has_postindex_writeback:
operands.append(("!", "[") + parts)
else:
operands.append(("[",) + parts)
for operand in operands: # type: ignore
# Try to ensure we've parsed the operands correctly.
# This is just sanity checking.
for o in (operand,) if isinstance(operand, str) else operand:
# operands can look like:
# - reg
# - reg0
# - reg(stack)
# - reg0(stack)
# - reg(not-stack)
# - reg0(not-stack)
# - #int
# - #int0
# and a limited set of supported operators.
# use an inline regex so that its easy to read. not perf critical.
assert re.match(r"^(reg|#int)[0-9]?(\(stack\)|\(not-stack\))?$", o) or o in ("[", ",", "!", "+", "*")
return cls(tuple(mnemonics), tuple(operands), capture)
@dataclass
class MatchResult:
operand_index: int
expression_index: int
expression: BinExport2.Expression
def match(
self, mnemonic: str, operand_expressions: List[List[BinExport2.Expression]]
) -> Optional["BinExport2InstructionPattern.MatchResult"]:
"""
Match the given BinExport2 data against this pattern.
The BinExport2 expression tree must have been flattened, such as with
capa.features.extractors.binexport2.helpers.get_operand_expressions.
If there's a match, the captured Expression instance is returned.
Otherwise, you get None back.
"""
if mnemonic not in self.mnemonics:
return None
if len(self.operands) != len(operand_expressions):
return None
captured = None
for operand_index, found_expressions in enumerate(operand_expressions):
wanted_expressions = self.operands[operand_index]
# from `"reg"` to `("reg", )`
if isinstance(wanted_expressions, str):
wanted_expressions = (wanted_expressions,)
assert isinstance(wanted_expressions, tuple)
if len(wanted_expressions) != len(found_expressions):
return None
for expression_index, (wanted_expression, found_expression) in enumerate(
zip(wanted_expressions, found_expressions)
):
if wanted_expression.startswith("reg"):
if found_expression.type != BinExport2.Expression.REGISTER:
return None
if wanted_expression.endswith(")"):
if wanted_expression.endswith("(not-stack)"):
# intel 64: rsp, esp, sp,
# intel 32: ebp, ebp, bp
# arm: sp
register_name = found_expression.symbol.lower()
if register_name in ("rsp", "esp", "sp", "rbp", "ebp", "bp"):
return None
elif wanted_expression.endswith("(stack)"):
register_name = found_expression.symbol.lower()
if register_name not in ("rsp", "esp", "sp", "rbp", "ebp", "bp"):
return None
else:
raise ValueError("unexpected expression suffix", wanted_expression)
if self.capture == wanted_expression:
captured = BinExport2InstructionPattern.MatchResult(
operand_index, expression_index, found_expression
)
elif wanted_expression.startswith("#int"):
if found_expression.type != BinExport2.Expression.IMMEDIATE_INT:
return None
if self.capture == wanted_expression:
captured = BinExport2InstructionPattern.MatchResult(
operand_index, expression_index, found_expression
)
elif wanted_expression == "[":
if found_expression.type != BinExport2.Expression.DEREFERENCE:
return None
elif wanted_expression in (",", "!", "+", "*"):
if found_expression.type != BinExport2.Expression.OPERATOR:
return None
if found_expression.symbol != wanted_expression:
return None
else:
raise ValueError(found_expression)
if captured:
return captured
else:
# There were no captures, so
# return arbitrary non-None expression
return BinExport2InstructionPattern.MatchResult(operand_index, expression_index, found_expression)
class BinExport2InstructionPatternMatcher:
"""Index and match a collection of instruction patterns."""
def __init__(self, queries: List[BinExport2InstructionPattern]):
self.queries = queries
# shard the patterns by (mnemonic, #operands)
self._index: Dict[Tuple[str, int], List[BinExport2InstructionPattern]] = defaultdict(list)
for query in queries:
for mnemonic in query.mnemonics:
self._index[(mnemonic.lower(), len(query.operands))].append(query)
@classmethod
def from_str(cls, patterns: str):
return cls(
[
BinExport2InstructionPattern.from_str(line)
for line in filter(
lambda line: not line.startswith("#"), (line.strip() for line in patterns.split("\n"))
)
]
)
def match(
self, mnemonic: str, operand_expressions: List[List[BinExport2.Expression]]
) -> Optional[BinExport2InstructionPattern.MatchResult]:
queries = self._index.get((mnemonic.lower(), len(operand_expressions)), [])
for query in queries:
captured = query.match(mnemonic.lower(), operand_expressions)
if captured:
return captured
return None
def match_with_be2(
self, be2: BinExport2, instruction_index: int
) -> Optional[BinExport2InstructionPattern.MatchResult]:
instruction: BinExport2.Instruction = be2.instruction[instruction_index]
mnemonic: str = get_instruction_mnemonic(be2, instruction)
if (mnemonic.lower(), len(instruction.operand_index)) not in self._index:
# verify that we might have a hit before we realize the operand expression list
return None
operands = []
for operand_index in instruction.operand_index:
operands.append(get_operand_expressions(be2, be2.operand[operand_index]))
return self.match(mnemonic, operands)

View File

@@ -1,254 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import List, Tuple, Iterator
import capa.features.extractors.helpers
import capa.features.extractors.strings
import capa.features.extractors.binexport2.helpers
import capa.features.extractors.binexport2.arch.arm.insn
import capa.features.extractors.binexport2.arch.intel.insn
from capa.features.insn import API, Mnemonic
from capa.features.common import Bytes, String, Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.extractors.binexport2 import (
AddressSpace,
AnalysisContext,
BinExport2Index,
FunctionContext,
ReadMemoryError,
BinExport2Analysis,
InstructionContext,
)
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle
from capa.features.extractors.binexport2.helpers import HAS_ARCH_ARM, HAS_ARCH_INTEL
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
def extract_insn_api_features(fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
be2_index: BinExport2Index = fhi.ctx.idx
be2_analysis: BinExport2Analysis = fhi.ctx.analysis
insn: BinExport2.Instruction = be2.instruction[ii.instruction_index]
for addr in insn.call_target:
addr = be2_analysis.thunks.get(addr, addr)
if addr not in be2_index.vertex_index_by_address:
# disassembler did not define function at address
logger.debug("0x%x is not a vertex", addr)
continue
vertex_idx: int = be2_index.vertex_index_by_address[addr]
vertex: BinExport2.CallGraph.Vertex = be2.call_graph.vertex[vertex_idx]
if not capa.features.extractors.binexport2.helpers.is_vertex_type(
vertex, BinExport2.CallGraph.Vertex.Type.IMPORTED
):
continue
if not vertex.HasField("mangled_name"):
logger.debug("vertex %d does not have mangled_name", vertex_idx)
continue
api_name: str = vertex.mangled_name
for name in capa.features.extractors.helpers.generate_symbols("", api_name):
yield API(name), ih.address
def extract_insn_number_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_insn_number_features(fh, bbh, ih)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_insn_number_features(fh, bbh, ih)
def extract_insn_bytes_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
ctx: AnalysisContext = fhi.ctx
be2: BinExport2 = ctx.be2
idx: BinExport2Index = ctx.idx
address_space: AddressSpace = ctx.address_space
instruction_index: int = ii.instruction_index
if instruction_index in idx.string_reference_index_by_source_instruction_index:
# disassembler already identified string reference from instruction
return
reference_addresses: List[int] = []
if instruction_index in idx.data_reference_index_by_source_instruction_index:
for data_reference_index in idx.data_reference_index_by_source_instruction_index[instruction_index]:
data_reference: BinExport2.DataReference = be2.data_reference[data_reference_index]
data_reference_address: int = data_reference.address
if data_reference_address in idx.insn_address_by_index:
# appears to be code
continue
reference_addresses.append(data_reference_address)
for reference_address in reference_addresses:
try:
# if at end of segment then there might be an overrun here.
buf: bytes = address_space.read_memory(reference_address, 0x100)
except ReadMemoryError:
logger.debug("failed to read memory: 0x%x", reference_address)
continue
if capa.features.extractors.helpers.all_zeros(buf):
continue
is_string: bool = False
# note: we *always* break after the first iteration
for s in capa.features.extractors.strings.extract_ascii_strings(buf):
if s.offset != 0:
break
yield String(s.s), ih.address
is_string = True
break
# note: we *always* break after the first iteration
for s in capa.features.extractors.strings.extract_unicode_strings(buf):
if s.offset != 0:
break
yield String(s.s), ih.address
is_string = True
break
if not is_string:
yield Bytes(buf), ih.address
def extract_insn_string_features(
fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
instruction_index: int = ii.instruction_index
if instruction_index in idx.string_reference_index_by_source_instruction_index:
for string_reference_index in idx.string_reference_index_by_source_instruction_index[instruction_index]:
string_reference: BinExport2.Reference = be2.string_reference[string_reference_index]
string_index: int = string_reference.string_table_index
string: str = be2.string_table[string_index]
yield String(string), ih.address
def extract_insn_offset_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_insn_offset_features(fh, bbh, ih)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_insn_offset_features(fh, bbh, ih)
def extract_insn_nzxor_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_insn_nzxor_characteristic_features(
fh, bbh, ih
)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_insn_nzxor_characteristic_features(
fh, bbh, ih
)
def extract_insn_mnemonic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
mnemonic: BinExport2.Mnemonic = be2.mnemonic[instruction.mnemonic_index]
mnemonic_name: str = mnemonic.name.lower()
yield Mnemonic(mnemonic_name), ih.address
def extract_function_calls_from(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
"""extract functions calls from features
most relevant at the function scope;
however, its most efficient to extract at the instruction scope.
"""
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
for call_target_address in instruction.call_target:
addr: AbsoluteVirtualAddress = AbsoluteVirtualAddress(call_target_address)
yield Characteristic("calls from"), addr
if fh.address == addr:
yield Characteristic("recursive call"), addr
def extract_function_indirect_call_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_function_indirect_call_characteristic_features(
fh, bbh, ih
)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_function_indirect_call_characteristic_features(
fh, bbh, ih
)
def extract_features(f: FunctionHandle, bbh: BBHandle, insn: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
"""extract instruction features"""
for inst_handler in INSTRUCTION_HANDLERS:
for feature, ea in inst_handler(f, bbh, insn):
yield feature, ea
INSTRUCTION_HANDLERS = (
extract_insn_api_features,
extract_insn_number_features,
extract_insn_bytes_features,
extract_insn_string_features,
extract_insn_offset_features,
extract_insn_nzxor_characteristic_features,
extract_insn_mnemonic_features,
extract_function_calls_from,
extract_function_indirect_call_characteristic_features,
)

View File

@@ -7,15 +7,17 @@
# See the License for the specific language governing permissions and limitations under the License.
import string
import struct
from typing import Tuple, Iterator
from binaryninja import Function
from binaryninja import Function, Settings
from binaryninja import BasicBlock as BinjaBasicBlock
from binaryninja import (
BinaryView,
SymbolType,
RegisterValueType,
VariableSourceType,
MediumLevelILSetVar,
MediumLevelILOperation,
MediumLevelILBasicBlock,
MediumLevelILInstruction,
@@ -27,6 +29,11 @@ from capa.features.basicblock import BasicBlock
from capa.features.extractors.helpers import MIN_STACKSTRING_LEN
from capa.features.extractors.base_extractor import BBHandle, FunctionHandle
use_const_outline: bool = False
settings: Settings = Settings()
if settings.contains("analysis.outlining.builtins") and settings.get_bool("analysis.outlining.builtins"):
use_const_outline = True
def get_printable_len_ascii(s: bytes) -> int:
"""Return string length if all operand bytes are ascii or utf16-le printable"""
@@ -58,7 +65,7 @@ def get_stack_string_len(f: Function, il: MediumLevelILInstruction) -> int:
addr = target.value.value
sym = bv.get_symbol_at(addr)
if not sym or sym.type not in [SymbolType.LibraryFunctionSymbol, SymbolType.SymbolicFunctionSymbol]:
if not sym or sym.type != SymbolType.LibraryFunctionSymbol:
return 0
if sym.name not in ["__builtin_strncpy", "__builtin_strcpy", "__builtin_wcscpy"]:
@@ -84,6 +91,52 @@ def get_stack_string_len(f: Function, il: MediumLevelILInstruction) -> int:
return max(get_printable_len_ascii(bytes(s)), get_printable_len_wide(bytes(s)))
def get_printable_len(il: MediumLevelILSetVar) -> int:
"""Return string length if all operand bytes are ascii or utf16-le printable"""
width = il.dest.type.width
value = il.src.value.value
if width == 1:
chars = struct.pack("<B", value & 0xFF)
elif width == 2:
chars = struct.pack("<H", value & 0xFFFF)
elif width == 4:
chars = struct.pack("<I", value & 0xFFFFFFFF)
elif width == 8:
chars = struct.pack("<Q", value & 0xFFFFFFFFFFFFFFFF)
else:
return 0
def is_printable_ascii(chars_: bytes):
return all(c < 127 and chr(c) in string.printable for c in chars_)
def is_printable_utf16le(chars_: bytes):
if all(c == 0x00 for c in chars_[1::2]):
return is_printable_ascii(chars_[::2])
if is_printable_ascii(chars):
return width
if is_printable_utf16le(chars):
return width // 2
return 0
def is_mov_imm_to_stack(il: MediumLevelILInstruction) -> bool:
"""verify instruction moves immediate onto stack"""
if il.operation != MediumLevelILOperation.MLIL_SET_VAR:
return False
if il.src.operation != MediumLevelILOperation.MLIL_CONST:
return False
if il.dest.source_type != VariableSourceType.StackVariableSourceType:
return False
return True
def bb_contains_stackstring(f: Function, bb: MediumLevelILBasicBlock) -> bool:
"""check basic block for stackstring indicators
@@ -91,10 +144,14 @@ def bb_contains_stackstring(f: Function, bb: MediumLevelILBasicBlock) -> bool:
"""
count = 0
for il in bb:
count += get_stack_string_len(f, il)
if count > MIN_STACKSTRING_LEN:
return True
if use_const_outline:
count += get_stack_string_len(f, il)
else:
if is_mov_imm_to_stack(il):
count += get_printable_len(il)
if count > MIN_STACKSTRING_LEN:
return True
return False

View File

@@ -28,7 +28,7 @@ from capa.features.extractors.base_extractor import (
class BinjaFeatureExtractor(StaticFeatureExtractor):
def __init__(self, bv: binja.BinaryView):
super().__init__(hashes=SampleHashes.from_bytes(bv.file.raw.read(0, bv.file.raw.length)))
super().__init__(hashes=SampleHashes.from_bytes(bv.file.raw.read(0, len(bv.file.raw))))
self.bv = bv
self.global_features: List[Tuple[Feature, Address]] = []
self.global_features.extend(capa.features.extractors.binja.file.extract_file_format(self.bv))

View File

@@ -5,6 +5,8 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import struct
from typing import Tuple, Iterator
from binaryninja import Segment, BinaryView, SymbolType, SymbolBinding
@@ -18,40 +20,67 @@ from capa.features.address import NO_ADDRESS, Address, FileOffsetAddress, Absolu
from capa.features.extractors.binja.helpers import read_c_string, unmangle_c_name
def check_segment_for_pe(bv: BinaryView, seg: Segment) -> Iterator[Tuple[Feature, Address]]:
"""check segment for embedded PE"""
start = 0
if bv.view_type == "PE" and seg.start == bv.start:
# If this is the first segment of the binary, skip the first bytes.
# Otherwise, there will always be a matched PE at the start of the binaryview.
def check_segment_for_pe(bv: BinaryView, seg: Segment) -> Iterator[Tuple[int, int]]:
"""check segment for embedded PE
adapted for binja from:
https://github.com/vivisect/vivisect/blob/7be4037b1cecc4551b397f840405a1fc606f9b53/PE/carve.py#L19
"""
mz_xor = [
(
capa.features.extractors.helpers.xor_static(b"MZ", i),
capa.features.extractors.helpers.xor_static(b"PE", i),
i,
)
for i in range(256)
]
todo = []
# If this is the first segment of the binary, skip the first bytes. Otherwise, there will always be a matched
# PE at the start of the binaryview.
start = seg.start
if bv.view_type == "PE" and start == bv.start:
start += 1
buf = bv.read(seg.start, seg.length)
for mzx, pex, i in mz_xor:
for off, _ in bv.find_all_data(start, seg.end, mzx):
todo.append((off, mzx, pex, i))
for offset, _ in capa.features.extractors.helpers.carve_pe(buf, start):
yield Characteristic("embedded pe"), FileOffsetAddress(seg.start + offset)
while len(todo):
off, mzx, pex, i = todo.pop()
# The MZ header has one field we will check e_lfanew is at 0x3c
e_lfanew = off + 0x3C
if seg.end < (e_lfanew + 4):
continue
newoff = struct.unpack("<I", capa.features.extractors.helpers.xor_static(bv.read(e_lfanew, 4), i))[0]
peoff = off + newoff
if seg.end < (peoff + 2):
continue
if bv.read(peoff, 2) == pex:
yield off, i
def extract_file_embedded_pe(bv: BinaryView) -> Iterator[Tuple[Feature, Address]]:
"""extract embedded PE features"""
for seg in bv.segments:
yield from check_segment_for_pe(bv, seg)
for ea, _ in check_segment_for_pe(bv, seg):
yield Characteristic("embedded pe"), FileOffsetAddress(ea)
def extract_file_export_names(bv: BinaryView) -> Iterator[Tuple[Feature, Address]]:
"""extract function exports"""
for sym in bv.get_symbols_of_type(SymbolType.FunctionSymbol) + bv.get_symbols_of_type(SymbolType.DataSymbol):
for sym in bv.get_symbols_of_type(SymbolType.FunctionSymbol):
if sym.binding in [SymbolBinding.GlobalBinding, SymbolBinding.WeakBinding]:
name = sym.short_name
if name.startswith("__forwarder_name(") and name.endswith(")"):
yield Export(name[17:-1]), AbsoluteVirtualAddress(sym.address)
yield Characteristic("forwarded export"), AbsoluteVirtualAddress(sym.address)
else:
yield Export(name), AbsoluteVirtualAddress(sym.address)
unmangled_name = unmangle_c_name(name)
if name != unmangled_name:
yield Export(unmangled_name), AbsoluteVirtualAddress(sym.address)
yield Export(name), AbsoluteVirtualAddress(sym.address)
unmangled_name = unmangle_c_name(name)
if name != unmangled_name:
yield Export(unmangled_name), AbsoluteVirtualAddress(sym.address)
for sym in bv.get_symbols_of_type(SymbolType.DataSymbol):
if sym.binding not in [SymbolBinding.GlobalBinding]:

View File

@@ -5,175 +5,31 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import os
import sys
import logging
import subprocess
import importlib.util
from typing import Optional
from pathlib import Path
logger = logging.getLogger(__name__)
# When the script gets executed as a standalone executable (via PyInstaller), `import binaryninja` does not work because
# we have excluded the binaryninja module in `pyinstaller.spec`. The trick here is to call the system Python and try
# to find out the path of the binaryninja module that has been installed.
# Note, including the binaryninja module in the `pyinstaller.spec` would not work, since the binaryninja module tries to
# Note, including the binaryninja module in the `pyintaller.spec` would not work, since the binaryninja module tries to
# find the binaryninja core e.g., `libbinaryninjacore.dylib`, using a relative path. And this does not work when the
# binaryninja module is extracted by the PyInstaller.
CODE = r"""
code = r"""
from pathlib import Path
from importlib import util
spec = util.find_spec('binaryninja')
if spec is not None:
if len(spec.submodule_search_locations) > 0:
path = Path(spec.submodule_search_locations[0])
# encode the path with utf8 then convert to hex, make sure it can be read and restored properly
print(str(path.parent).encode('utf8').hex())
path = Path(spec.submodule_search_locations[0])
# encode the path with utf8 then convert to hex, make sure it can be read and restored properly
print(str(path.parent).encode('utf8').hex())
"""
def find_binaryninja_path_via_subprocess() -> Optional[Path]:
raw_output = subprocess.check_output(["python", "-c", CODE]).decode("ascii").strip()
output = bytes.fromhex(raw_output).decode("utf8")
if not output.strip():
return None
return Path(output)
def get_desktop_entry(name: str) -> Optional[Path]:
"""
Find the path for the given XDG Desktop Entry name.
Like:
>> get_desktop_entry("com.vector35.binaryninja.desktop")
Path("~/.local/share/applications/com.vector35.binaryninja.desktop")
"""
assert sys.platform in ("linux", "linux2")
assert name.endswith(".desktop")
data_dirs = os.environ.get("XDG_DATA_DIRS", "/usr/share") + f":{Path.home()}/.local/share"
for data_dir in data_dirs.split(":"):
applications = Path(data_dir) / "applications"
for application in applications.glob("*.desktop"):
if application.name == name:
return application
return None
def get_binaryninja_path(desktop_entry: Path) -> Optional[Path]:
# from: Exec=/home/wballenthin/software/binaryninja/binaryninja %u
# to: /home/wballenthin/software/binaryninja/
for line in desktop_entry.read_text(encoding="utf-8").splitlines():
if not line.startswith("Exec="):
continue
if not line.endswith("binaryninja %u"):
continue
binaryninja_path = Path(line[len("Exec=") : -len("binaryninja %u")])
if not binaryninja_path.exists():
return None
return binaryninja_path
return None
def validate_binaryninja_path(binaryninja_path: Path) -> bool:
if not binaryninja_path:
return False
module_path = binaryninja_path / "python"
if not module_path.is_dir():
return False
if not (module_path / "binaryninja" / "__init__.py").is_file():
return False
return True
def find_binaryninja() -> Optional[Path]:
binaryninja_path = find_binaryninja_path_via_subprocess()
if not binaryninja_path or not validate_binaryninja_path(binaryninja_path):
if sys.platform == "linux" or sys.platform == "linux2":
# ok
logger.debug("detected OS: linux")
elif sys.platform == "darwin":
logger.warning("unsupported platform to find Binary Ninja: %s", sys.platform)
return False
elif sys.platform == "win32":
logger.warning("unsupported platform to find Binary Ninja: %s", sys.platform)
return False
else:
logger.warning("unsupported platform to find Binary Ninja: %s", sys.platform)
return False
desktop_entry = get_desktop_entry("com.vector35.binaryninja.desktop")
if not desktop_entry:
logger.debug("failed to find Binary Ninja application")
return None
logger.debug("found Binary Ninja application: %s", desktop_entry)
binaryninja_path = get_binaryninja_path(desktop_entry)
if not binaryninja_path:
logger.debug("failed to determine Binary Ninja installation path")
return None
if not validate_binaryninja_path(binaryninja_path):
logger.debug("failed to validate Binary Ninja installation")
return None
logger.debug("found Binary Ninja installation: %s", binaryninja_path)
return binaryninja_path / "python"
def is_binaryninja_installed() -> bool:
"""Is the binaryninja module ready to import?"""
try:
return importlib.util.find_spec("binaryninja") is not None
except ModuleNotFoundError:
return False
def has_binaryninja() -> bool:
if is_binaryninja_installed():
logger.debug("found installed Binary Ninja API")
return True
logger.debug("Binary Ninja API not installed, searching...")
binaryninja_path = find_binaryninja()
if not binaryninja_path:
logger.debug("failed to find Binary Ninja installation")
logger.debug("found Binary Ninja API: %s", binaryninja_path)
return binaryninja_path is not None
def load_binaryninja() -> bool:
try:
import binaryninja
return True
except ImportError:
binaryninja_path = find_binaryninja()
if not binaryninja_path:
return False
sys.path.append(binaryninja_path.absolute().as_posix())
try:
import binaryninja # noqa: F401 unused import
return True
except ImportError:
return False
def find_binja_path() -> Path:
raw_output = subprocess.check_output(["python", "-c", code]).decode("ascii").strip()
return Path(bytes.fromhex(raw_output).decode("utf8"))
if __name__ == "__main__":
print(find_binaryninja_path_via_subprocess())
print(find_binja_path())

View File

@@ -9,7 +9,6 @@
import logging
from typing import Tuple, Iterator
import capa.features.extractors.helpers
from capa.helpers import assert_never
from capa.features.insn import API, Number
from capa.features.common import String, Feature
@@ -51,8 +50,7 @@ def extract_call_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -
else:
assert_never(value)
for name in capa.features.extractors.helpers.generate_symbols("", call.api):
yield API(name), ch.address
yield API(call.api), ch.address
def extract_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:

View File

@@ -128,14 +128,6 @@ class CapeExtractor(DynamicFeatureExtractor):
if cr.info.version not in TESTED_VERSIONS:
logger.warning("CAPE version '%s' not tested/supported yet", cr.info.version)
# TODO(mr-tz): support more file types
# https://github.com/mandiant/capa/issues/1933
if "PE" not in cr.target.file.type:
logger.error(
"capa currently only supports PE target files, this target file's type is: '%s'.\nPlease report this at: https://github.com/mandiant/capa/issues/1933",
cr.target.file.type,
)
# observed in 2.4-CAPE reports from capesandbox.com
if cr.static is None and cr.target.file.pe is not None:
cr.static = Static()

View File

@@ -48,7 +48,7 @@ def extract_format(report: CapeReport) -> Iterator[Tuple[Feature, Address]]:
else:
logger.warning("unknown file format, file command output: %s", report.target.file.type)
raise ValueError(
f"unrecognized file format from the CAPE report; output of file command: {report.target.file.type}"
"unrecognized file format from the CAPE report; output of file command: {report.target.file.type}"
)
@@ -73,7 +73,7 @@ def extract_os(report: CapeReport) -> Iterator[Tuple[Feature, Address]]:
else:
# if the operating system information is missing from the cape report, it's likely a bug
logger.warning("unrecognized OS: %s", file_output)
raise ValueError(f"unrecognized OS from the CAPE report; output of file command: {file_output}")
raise ValueError("unrecognized OS from the CAPE report; output of file command: {file_output}")
else:
# the sample is shellcode
logger.debug("unsupported file format, file command output: %s", file_output)

View File

@@ -46,7 +46,7 @@ class FlexibleModel(BaseModel):
# use this type to indicate that we won't model this data.
# because it's not relevant to our use in capa.
# because its not relevant to our use in capa.
#
# while its nice to have full coverage of the data shape,
# it can easily change and break our parsing.
@@ -230,7 +230,7 @@ class File(FlexibleModel):
sha1: str
sha256: str
sha512: str
sha3_384: Optional[str] = None
sha3_384: str
ssdeep: str
# unsure why this would ever be "False"
tlsh: Optional[Union[str, bool]] = None
@@ -356,8 +356,8 @@ class Behavior(ExactModel):
anomaly: List[str]
encryptedbuffers: List[EncryptedBuffer]
# these are small objects that describe atomic events,
# like file move, registry access.
# we'll detect the same with our API call analysis.
# like file move, registery access.
# we'll detect the same with our API call analyis.
enhanced: Skip = None
@@ -398,7 +398,7 @@ class CapeReport(FlexibleModel):
behavior: Behavior
# post-processed results: payloads and extracted configs
CAPE: Optional[Union[Cape, List]] = None
CAPE: Optional[Cape] = None
dropped: Optional[List[File]] = None
procdump: Optional[List[ProcessFile]] = None
procmemory: ListTODO

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -45,7 +45,7 @@ MATCH_RESULT = b'{"meta":'
MATCH_JSON_OBJECT = b'{"'
def extract_file_strings(buf: bytes, **kwargs) -> Iterator[Tuple[String, Address]]:
def extract_file_strings(buf, **kwargs) -> Iterator[Tuple[String, Address]]:
"""
extract ASCII and UTF-16 LE strings from file
"""
@@ -56,7 +56,7 @@ def extract_file_strings(buf: bytes, **kwargs) -> Iterator[Tuple[String, Address
yield String(s.s), FileOffsetAddress(s.offset)
def extract_format(buf: bytes) -> Iterator[Tuple[Feature, Address]]:
def extract_format(buf) -> Iterator[Tuple[Feature, Address]]:
if buf.startswith(MATCH_PE):
yield Format(FORMAT_PE), NO_ADDRESS
elif buf.startswith(MATCH_ELF):
@@ -75,7 +75,7 @@ def extract_format(buf: bytes) -> Iterator[Tuple[Feature, Address]]:
# 1. handling a file format (e.g. macho)
#
# for (1), this logic will need to be updated as the format is implemented.
logger.debug("unknown file format: %s", buf[:4].hex())
logger.debug("unsupported file format: %s", binascii.hexlify(buf[:4]).decode("ascii"))
return

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -83,7 +83,7 @@ def read_dotnet_user_string(pe: dnfile.dnPE, token: StringToken) -> Optional[str
return None
try:
user_string: Optional[dnfile.stream.UserString] = pe.net.user_strings.get(token.rid)
user_string: Optional[dnfile.stream.UserString] = pe.net.user_strings.get_us(token.rid)
except UnicodeDecodeError as e:
logger.debug("failed to decode #US stream index 0x%08x (%s)", token.rid, e)
return None
@@ -119,14 +119,14 @@ def get_dotnet_managed_imports(pe: dnfile.dnPE) -> Iterator[DnType]:
access: Optional[str]
# assume .NET imports starting with get_/set_ are used to access a property
member_ref_name: str = str(member_ref.Name)
if member_ref_name.startswith("get_"):
if member_ref.Name.startswith("get_"):
access = FeatureAccess.READ
elif member_ref_name.startswith("set_"):
elif member_ref.Name.startswith("set_"):
access = FeatureAccess.WRITE
else:
access = None
member_ref_name: str = member_ref.Name
if member_ref_name.startswith(("get_", "set_")):
# remove get_/set_ from MemberRef name
member_ref_name = member_ref_name[4:]
@@ -212,7 +212,7 @@ def get_dotnet_managed_methods(pe: dnfile.dnPE) -> Iterator[DnType]:
token: int = calculate_dotnet_token_value(method.table.number, method.row_index)
access: Optional[str] = accessor_map.get(token)
method_name: str = str(method.row.Name)
method_name: str = method.row.Name
if method_name.startswith(("get_", "set_")):
# remove get_/set_
method_name = method_name[4:]
@@ -289,8 +289,8 @@ def get_dotnet_unmanaged_imports(pe: dnfile.dnPE) -> Iterator[DnUnmanagedMethod]
logger.debug("ImplMap[0x%X] ImportScope row is None", rid)
module = ""
else:
module = str(impl_map.ImportScope.row.Name)
method: str = str(impl_map.ImportName)
module = impl_map.ImportScope.row.Name
method: str = impl_map.ImportName
member_forward_table: int
if impl_map.MemberForwarded.table is None:
@@ -320,11 +320,8 @@ def get_dotnet_table_row(pe: dnfile.dnPE, table_index: int, row_index: int) -> O
if row_index - 1 <= 0:
return None
table: Optional[dnfile.base.ClrMetaDataTable] = pe.net.mdtables.tables.get(table_index)
if table is None:
return None
try:
table = pe.net.mdtables.tables.get(table_index, [])
return table[row_index - 1]
except IndexError:
return None
@@ -337,7 +334,7 @@ def resolve_nested_typedef_name(
if index in nested_class_table:
typedef_name = []
name = str(typedef.TypeName)
name = typedef.TypeName
# Append the current typedef name
typedef_name.append(name)
@@ -346,24 +343,24 @@ def resolve_nested_typedef_name(
# Iterate through the typedef table to resolve the nested name
table_row = get_dotnet_table_row(pe, dnfile.mdtable.TypeDef.number, nested_class_table[index])
if table_row is None:
return str(typedef.TypeNamespace), tuple(typedef_name[::-1])
return typedef.TypeNamespace, tuple(typedef_name[::-1])
name = str(table_row.TypeName)
name = table_row.TypeName
typedef_name.append(name)
index = nested_class_table[index]
# Document the root enclosing details
table_row = get_dotnet_table_row(pe, dnfile.mdtable.TypeDef.number, nested_class_table[index])
if table_row is None:
return str(typedef.TypeNamespace), tuple(typedef_name[::-1])
return typedef.TypeNamespace, tuple(typedef_name[::-1])
enclosing_name = str(table_row.TypeName)
enclosing_name = table_row.TypeName
typedef_name.append(enclosing_name)
return str(table_row.TypeNamespace), tuple(typedef_name[::-1])
return table_row.TypeNamespace, tuple(typedef_name[::-1])
else:
return str(typedef.TypeNamespace), (str(typedef.TypeName),)
return typedef.TypeNamespace, (typedef.TypeName,)
def resolve_nested_typeref_name(
@@ -373,29 +370,29 @@ def resolve_nested_typeref_name(
# If the ResolutionScope decodes to a typeRef type then it is nested
if isinstance(typeref.ResolutionScope.table, dnfile.mdtable.TypeRef):
typeref_name = []
name = str(typeref.TypeName)
name = typeref.TypeName
# Not appending the current typeref name to avoid potential duplicate
# Validate index
table_row = get_dotnet_table_row(pe, dnfile.mdtable.TypeRef.number, index)
if table_row is None:
return str(typeref.TypeNamespace), (str(typeref.TypeName),)
return typeref.TypeNamespace, (typeref.TypeName,)
while isinstance(table_row.ResolutionScope.table, dnfile.mdtable.TypeRef):
# Iterate through the typeref table to resolve the nested name
typeref_name.append(name)
name = str(table_row.TypeName)
name = table_row.TypeName
table_row = get_dotnet_table_row(pe, dnfile.mdtable.TypeRef.number, table_row.ResolutionScope.row_index)
if table_row is None:
return str(typeref.TypeNamespace), tuple(typeref_name[::-1])
return typeref.TypeNamespace, tuple(typeref_name[::-1])
# Document the root enclosing details
typeref_name.append(str(table_row.TypeName))
typeref_name.append(table_row.TypeName)
return str(table_row.TypeNamespace), tuple(typeref_name[::-1])
return table_row.TypeNamespace, tuple(typeref_name[::-1])
else:
return str(typeref.TypeNamespace), (str(typeref.TypeName),)
return typeref.TypeNamespace, (typeref.TypeName,)
def get_dotnet_nested_class_table_index(pe: dnfile.dnPE) -> Dict[int, int]:

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -49,8 +49,8 @@ logger = logging.getLogger(__name__)
def extract_file_format(**kwargs) -> Iterator[Tuple[Format, Address]]:
yield Format(FORMAT_DOTNET), NO_ADDRESS
yield Format(FORMAT_PE), NO_ADDRESS
yield Format(FORMAT_DOTNET), NO_ADDRESS
def extract_file_import_names(pe: dnfile.dnPE, **kwargs) -> Iterator[Tuple[Import, Address]]:
@@ -78,12 +78,12 @@ def extract_file_namespace_features(pe: dnfile.dnPE, **kwargs) -> Iterator[Tuple
for _, typedef in iter_dotnet_table(pe, dnfile.mdtable.TypeDef.number):
# emit internal .NET namespaces
assert isinstance(typedef, dnfile.mdtable.TypeDefRow)
namespaces.add(str(typedef.TypeNamespace))
namespaces.add(typedef.TypeNamespace)
for _, typeref in iter_dotnet_table(pe, dnfile.mdtable.TypeRef.number):
# emit external .NET namespaces
assert isinstance(typeref, dnfile.mdtable.TypeRefRow)
namespaces.add(str(typeref.TypeNamespace))
namespaces.add(typeref.TypeNamespace)
# namespaces may be empty, discard
namespaces.discard("")

View File

@@ -1,58 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Tuple, Iterator
import capa.features.extractors.helpers
from capa.features.insn import API, Number
from capa.features.common import String, Feature
from capa.features.address import Address
from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, ProcessHandle
from capa.features.extractors.drakvuf.models import Call
logger = logging.getLogger(__name__)
def extract_call_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:
"""
This method extracts the given call's features (such as API name and arguments),
and returns them as API, Number, and String features.
args:
ph: process handle (for defining the extraction scope)
th: thread handle (for defining the extraction scope)
ch: call handle (for defining the extraction scope)
yields:
Feature, address; where Feature is either: API, Number, or String.
"""
call: Call = ch.inner
# list similar to disassembly: arguments right-to-left, call
for arg_value in reversed(call.arguments.values()):
try:
yield Number(int(arg_value, 0)), ch.address
except ValueError:
# DRAKVUF automatically resolves the contents of memory addresses, (e.g. Arg1="0xc6f217efe0:\"ntdll.dll\"").
# For those cases we yield the entire string as it, since yielding the address only would
# likely not provide any matches, and yielding just the memory contentswould probably be misleading,
# but yielding the entire string would be helpful for an analyst looking at the verbose output
yield String(arg_value), ch.address
for name in capa.features.extractors.helpers.generate_symbols("", call.name):
yield API(name), ch.address
def extract_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:
for handler in CALL_HANDLERS:
for feature, addr in handler(ph, th, ch):
yield feature, addr
CALL_HANDLERS = (extract_call_features,)

View File

@@ -1,96 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Dict, List, Tuple, Union, Iterator
import capa.features.extractors.drakvuf.call
import capa.features.extractors.drakvuf.file
import capa.features.extractors.drakvuf.thread
import capa.features.extractors.drakvuf.global_
import capa.features.extractors.drakvuf.process
from capa.features.common import Feature, Characteristic
from capa.features.address import NO_ADDRESS, Address, ThreadAddress, ProcessAddress, AbsoluteVirtualAddress, _NoAddress
from capa.features.extractors.base_extractor import (
CallHandle,
SampleHashes,
ThreadHandle,
ProcessHandle,
DynamicFeatureExtractor,
)
from capa.features.extractors.drakvuf.models import Call, DrakvufReport
from capa.features.extractors.drakvuf.helpers import index_calls
logger = logging.getLogger(__name__)
class DrakvufExtractor(DynamicFeatureExtractor):
def __init__(self, report: DrakvufReport):
super().__init__(
# DRAKVUF currently does not yield hash information about the sample in its output
hashes=SampleHashes(md5="", sha1="", sha256="")
)
self.report: DrakvufReport = report
# sort the api calls to prevent going through the entire list each time
self.sorted_calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]] = index_calls(report)
# pre-compute these because we'll yield them at *every* scope.
self.global_features = list(capa.features.extractors.drakvuf.global_.extract_features(self.report))
def get_base_address(self) -> Union[AbsoluteVirtualAddress, _NoAddress, None]:
# DRAKVUF currently does not yield information about the PE's address
return NO_ADDRESS
def extract_global_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from self.global_features
def extract_file_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.drakvuf.file.extract_features(self.report)
def get_processes(self) -> Iterator[ProcessHandle]:
yield from capa.features.extractors.drakvuf.file.get_processes(self.sorted_calls)
def extract_process_features(self, ph: ProcessHandle) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.drakvuf.process.extract_features(ph)
def get_process_name(self, ph: ProcessHandle) -> str:
return ph.inner["process_name"]
def get_threads(self, ph: ProcessHandle) -> Iterator[ThreadHandle]:
yield from capa.features.extractors.drakvuf.process.get_threads(self.sorted_calls, ph)
def extract_thread_features(self, ph: ProcessHandle, th: ThreadHandle) -> Iterator[Tuple[Feature, Address]]:
if False:
# force this routine to be a generator,
# but we don't actually have any elements to generate.
yield Characteristic("never"), NO_ADDRESS
return
def get_calls(self, ph: ProcessHandle, th: ThreadHandle) -> Iterator[CallHandle]:
yield from capa.features.extractors.drakvuf.thread.get_calls(self.sorted_calls, ph, th)
def get_call_name(self, ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> str:
call: Call = ch.inner
call_name = "{}({}){}".format(
call.name,
", ".join(f"{arg_name}={arg_value}" for arg_name, arg_value in call.arguments.items()),
(f" -> {getattr(call, 'return_value', '')}"), # SysCalls don't have a return value, while WinApi calls do
)
return call_name
def extract_call_features(
self, ph: ProcessHandle, th: ThreadHandle, ch: CallHandle
) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.drakvuf.call.extract_features(ph, th, ch)
@classmethod
def from_report(cls, report: Iterator[Dict]) -> "DrakvufExtractor":
dr = DrakvufReport.from_raw_report(report)
return DrakvufExtractor(report=dr)

View File

@@ -1,56 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Dict, List, Tuple, Iterator
from capa.features.file import Import
from capa.features.common import Feature
from capa.features.address import Address, ThreadAddress, ProcessAddress, AbsoluteVirtualAddress
from capa.features.extractors.helpers import generate_symbols
from capa.features.extractors.base_extractor import ProcessHandle
from capa.features.extractors.drakvuf.models import Call, DrakvufReport
logger = logging.getLogger(__name__)
def get_processes(calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]]) -> Iterator[ProcessHandle]:
"""
Get all the created processes for a sample.
"""
for proc_addr, calls_per_thread in calls.items():
sample_call = next(iter(calls_per_thread.values()))[0] # get process name
yield ProcessHandle(proc_addr, inner={"process_name": sample_call.process_name})
def extract_import_names(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
"""
Extract imported function names.
"""
if report.loaded_dlls is None:
return
dlls = report.loaded_dlls
for dll in dlls:
dll_base_name = dll.name.split("\\")[-1]
for function_name, function_address in dll.imports.items():
for name in generate_symbols(dll_base_name, function_name, include_dll=True):
yield Import(name), AbsoluteVirtualAddress(function_address)
def extract_features(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
for handler in FILE_HANDLERS:
for feature, addr in handler(report):
yield feature, addr
FILE_HANDLERS = (
# TODO(yelhamer): extract more file features from other DRAKVUF plugins
# https://github.com/mandiant/capa/issues/2169
extract_import_names,
)

View File

@@ -1,44 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Tuple, Iterator
from capa.features.common import OS, FORMAT_PE, ARCH_AMD64, OS_WINDOWS, Arch, Format, Feature
from capa.features.address import NO_ADDRESS, Address
from capa.features.extractors.drakvuf.models import DrakvufReport
logger = logging.getLogger(__name__)
def extract_format(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
# DRAKVUF sandbox currently supports only Windows as the guest: https://drakvuf-sandbox.readthedocs.io/en/latest/usage/getting_started.html
yield Format(FORMAT_PE), NO_ADDRESS
def extract_os(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
# DRAKVUF sandbox currently supports only PE files: https://drakvuf-sandbox.readthedocs.io/en/latest/usage/getting_started.html
yield OS(OS_WINDOWS), NO_ADDRESS
def extract_arch(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
# DRAKVUF sandbox currently supports only x64 Windows as the guest: https://drakvuf-sandbox.readthedocs.io/en/latest/usage/getting_started.html
yield Arch(ARCH_AMD64), NO_ADDRESS
def extract_features(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
for global_handler in GLOBAL_HANDLER:
for feature, addr in global_handler(report):
yield feature, addr
GLOBAL_HANDLER = (
extract_format,
extract_os,
extract_arch,
)

View File

@@ -1,39 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import itertools
from typing import Dict, List
from capa.features.address import ThreadAddress, ProcessAddress
from capa.features.extractors.drakvuf.models import Call, DrakvufReport
def index_calls(report: DrakvufReport) -> Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]]:
# this method organizes calls into processes and threads, and then sorts them based on
# timestamp so that we can address individual calls per index (CallAddress requires call index)
result: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]] = {}
for call in itertools.chain(report.syscalls, report.apicalls):
if call.pid == 0:
# DRAKVUF captures api/native calls from all processes running on the system.
# we ignore the pid 0 since it's a system process and it's unlikely for it to
# be hijacked or so on, in addition to capa addresses not supporting null pids
continue
proc_addr = ProcessAddress(pid=call.pid, ppid=call.ppid)
thread_addr = ThreadAddress(process=proc_addr, tid=call.tid)
if proc_addr not in result:
result[proc_addr] = {}
if thread_addr not in result[proc_addr]:
result[proc_addr][thread_addr] = []
result[proc_addr][thread_addr].append(call)
for proc, threads in result.items():
for thread in threads:
result[proc][thread].sort(key=lambda call: call.timestamp)
return result

View File

@@ -1,137 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Any, Dict, List, Iterator
from pydantic import Field, BaseModel, ConfigDict, model_validator
logger = logging.getLogger(__name__)
REQUIRED_SYSCALL_FIELD_NAMES = {
"Plugin",
"TimeStamp",
"PID",
"PPID",
"TID",
"UserName",
"UserId",
"ProcessName",
"Method",
"EventUID",
"Module",
"vCPU",
"CR3",
"Syscall",
"NArgs",
}
class ConciseModel(BaseModel):
ConfigDict(extra="ignore")
class DiscoveredDLL(ConciseModel):
plugin_name: str = Field(alias="Plugin")
event: str = Field(alias="Event")
name: str = Field(alias="DllName")
pid: int = Field(alias="PID")
class LoadedDLL(ConciseModel):
plugin_name: str = Field(alias="Plugin")
event: str = Field(alias="Event")
name: str = Field(alias="DllName")
imports: Dict[str, int] = Field(alias="Rva")
class Call(ConciseModel):
plugin_name: str = Field(alias="Plugin")
timestamp: str = Field(alias="TimeStamp")
process_name: str = Field(alias="ProcessName")
ppid: int = Field(alias="PPID")
pid: int = Field(alias="PID")
tid: int = Field(alias="TID")
name: str = Field(alias="Method")
arguments: Dict[str, str]
class WinApiCall(Call):
# This class models Windows API calls captured by DRAKVUF (DLLs, etc.).
arguments: Dict[str, str] = Field(alias="Arguments")
event: str = Field(alias="Event")
return_value: str = Field(alias="ReturnValue")
@model_validator(mode="before")
@classmethod
def build_arguments(cls, values: Dict[str, Any]) -> Dict[str, Any]:
args = values["Arguments"]
values["Arguments"] = dict(arg.split("=", 1) for arg in args)
return values
class SystemCall(Call):
# This class models native Windows API calls captured by DRAKVUF.
# Schema: {
# "Plugin": "syscall",
# "TimeStamp": "1716999134.582553",
# "PID": 3888, "PPID": 2852, "TID": 368, "UserName": "SessionID", "UserId": 2,
# "ProcessName": "\\Device\\HarddiskVolume2\\Windows\\explorer.exe",
# "Method": "NtSetIoCompletionEx",
# "EventUID": "0x27",
# "Module": "nt",
# "vCPU": 0,
# "CR3": "0x119b1002",
# "Syscall": 419,
# "NArgs": 6,
# "IoCompletionHandle": "0xffffffff80001ac0", "IoCompletionReserveHandle": "0xffffffff8000188c",
# "KeyContext": "0x0", "ApcContext": "0x2", "IoStatus": "0x7ffb00000000", "IoStatusInformation": "0x0"
# }
# The keys up until "NArgs" are common to all the native calls that DRAKVUF reports, with
# the remaining keys representing the call's specific arguments.
syscall_number: int = Field(alias="Syscall")
module: str = Field(alias="Module")
nargs: int = Field(alias="NArgs")
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
# DRAKVUF stores argument names and values as entries in the syscall's entry.
# This model validator collects those arguments into a list in the model.
values["arguments"] = {
name: value for name, value in values.items() if name not in REQUIRED_SYSCALL_FIELD_NAMES
}
return values
class DrakvufReport(ConciseModel):
syscalls: List[SystemCall] = []
apicalls: List[WinApiCall] = []
discovered_dlls: List[DiscoveredDLL] = []
loaded_dlls: List[LoadedDLL] = []
@classmethod
def from_raw_report(cls, entries: Iterator[Dict]) -> "DrakvufReport":
report = cls()
for entry in entries:
plugin = entry.get("Plugin")
# TODO(yelhamer): add support for more DRAKVUF plugins
# https://github.com/mandiant/capa/issues/2181
if plugin == "syscall":
report.syscalls.append(SystemCall(**entry))
elif plugin == "apimon":
event = entry.get("Event")
if event == "api_called":
report.apicalls.append(WinApiCall(**entry))
elif event == "dll_loaded":
report.loaded_dlls.append(LoadedDLL(**entry))
elif event == "dll_discovered":
report.discovered_dlls.append(DiscoveredDLL(**entry))
return report

View File

@@ -1,40 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Dict, List, Tuple, Iterator
from capa.features.common import String, Feature
from capa.features.address import Address, ThreadAddress, ProcessAddress
from capa.features.extractors.base_extractor import ThreadHandle, ProcessHandle
from capa.features.extractors.drakvuf.models import Call
logger = logging.getLogger(__name__)
def get_threads(
calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]], ph: ProcessHandle
) -> Iterator[ThreadHandle]:
"""
Get the threads associated with a given process.
"""
for thread_addr in calls[ph.address]:
yield ThreadHandle(address=thread_addr, inner={})
def extract_process_name(ph: ProcessHandle) -> Iterator[Tuple[Feature, Address]]:
yield String(ph.inner["process_name"]), ph.address
def extract_features(ph: ProcessHandle) -> Iterator[Tuple[Feature, Address]]:
for handler in PROCESS_HANDLERS:
for feature, addr in handler(ph):
yield feature, addr
PROCESS_HANDLERS = (extract_process_name,)

View File

@@ -1,24 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Dict, List, Iterator
from capa.features.address import ThreadAddress, ProcessAddress, DynamicCallAddress
from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, ProcessHandle
from capa.features.extractors.drakvuf.models import Call
logger = logging.getLogger(__name__)
def get_calls(
sorted_calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]], ph: ProcessHandle, th: ThreadHandle
) -> Iterator[CallHandle]:
for i, call in enumerate(sorted_calls[ph.address][th.address]):
call_addr = DynamicCallAddress(thread=th.address, id=i)
yield CallHandle(address=call_addr, inner=call)

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -10,11 +10,10 @@ import logging
import itertools
import collections
from enum import Enum
from typing import TYPE_CHECKING, Set, Dict, List, Tuple, BinaryIO, Iterator, Optional
from typing import Set, Dict, List, Tuple, BinaryIO, Iterator, Optional
from dataclasses import dataclass
if TYPE_CHECKING:
import Elf # from vivisect
import Elf # from vivisect
logger = logging.getLogger(__name__)
@@ -58,10 +57,6 @@ class OS(str, Enum):
SYLLABLE = "syllable"
NACL = "nacl"
ANDROID = "android"
DRAGONFLYBSD = "dragonfly BSD"
ILLUMOS = "illumos"
ZOS = "z/os"
UNIX = "unix"
# via readelf: https://github.com/bminor/binutils-gdb/blob/c0e94211e1ac05049a4ce7c192c9d14d1764eb3e/binutils/readelf.c#L19635-L19658
@@ -85,8 +80,6 @@ class Phdr:
paddr: int
filesz: int
buf: bytes
flags: int
memsz: int
@dataclass
@@ -212,7 +205,7 @@ class ELF:
15: OS.AROS,
16: OS.FENIXOS,
17: OS.CLOUD,
# 53: "SORTFIX", # i can't find any reference to this OS, i don't think it exists
# 53: "SORTFIX", # i can't find any reference to this OS, i dont think it exists
# 64: "ARM_AEABI", # not an OS
# 97: "ARM", # not an OS
# 255: "STANDALONE", # not an OS
@@ -310,9 +303,6 @@ class ELF:
98: "TPC",
99: "SNP1K",
100: "ST200",
# https://www.sco.com/developers/gabi/latest/ch4.eheader.html
183: "aarch64",
243: "riscv",
}
@property
@@ -324,23 +314,24 @@ class ELF:
phent_offset = i * self.e_phentsize
phent = self.phbuf[phent_offset : phent_offset + self.e_phentsize]
(p_type,) = struct.unpack_from(self.endian + "I", phent, 0x0)
logger.debug("ph:p_type: 0x%04x", p_type)
if self.bitness == 32:
p_type, p_offset, p_vaddr, p_paddr, p_filesz, p_memsz, p_flags = struct.unpack_from(
self.endian + "IIIIIII", phent, 0x0
)
p_offset, p_vaddr, p_paddr, p_filesz = struct.unpack_from(self.endian + "IIII", phent, 0x4)
elif self.bitness == 64:
p_type, p_flags, p_offset, p_vaddr, p_paddr, p_filesz, p_memsz = struct.unpack_from(
self.endian + "IIQQQQQ", phent, 0x0
)
p_offset, p_vaddr, p_paddr, p_filesz = struct.unpack_from(self.endian + "QQQQ", phent, 0x8)
else:
raise NotImplementedError()
logger.debug("ph:p_offset: 0x%02x p_filesz: 0x%04x", p_offset, p_filesz)
self.f.seek(p_offset)
buf = self.f.read(p_filesz)
if len(buf) != p_filesz:
raise ValueError("failed to read program header content")
return Phdr(p_type, p_offset, p_vaddr, p_paddr, p_filesz, buf, p_flags, p_memsz)
return Phdr(p_type, p_offset, p_vaddr, p_paddr, p_filesz, buf)
@property
def program_headers(self):
@@ -365,6 +356,8 @@ class ELF:
else:
raise NotImplementedError()
logger.debug("sh:sh_offset: 0x%02x sh_size: 0x%04x", sh_offset, sh_size)
self.f.seek(sh_offset)
buf = self.f.read(sh_size)
if len(buf) != sh_size:
@@ -731,7 +724,7 @@ class SymTab:
yield from self.symbols
@classmethod
def from_viv(cls, elf: "Elf.Elf") -> Optional["SymTab"]:
def from_viv(cls, elf: Elf.Elf) -> Optional["SymTab"]:
endian = "<" if elf.getEndian() == 0 else ">"
bitness = elf.bits
@@ -873,10 +866,6 @@ def guess_os_from_ident_directive(elf: ELF) -> Optional[OS]:
return OS.LINUX
elif "Red Hat" in comment:
return OS.LINUX
elif "Alpine" in comment:
return OS.LINUX
elif "Android" in comment:
return OS.ANDROID
return None
@@ -932,8 +921,6 @@ def guess_os_from_needed_dependencies(elf: ELF) -> Optional[OS]:
return OS.HURD
if needed.startswith("libandroid.so"):
return OS.ANDROID
if needed.startswith("liblog.so"):
return OS.ANDROID
return None
@@ -960,506 +947,11 @@ def guess_os_from_symtab(elf: ELF) -> Optional[OS]:
for os, hints in keywords.items():
if any(hint in sym_name for hint in hints):
logger.debug("symtab: %s looks like %s", sym_name, os)
return os
return None
def is_go_binary(elf: ELF) -> bool:
for shdr in elf.section_headers:
if shdr.get_name(elf) == ".note.go.buildid":
logger.debug("go buildinfo: found section .note.go.buildid")
return True
# The `go version` command enumerates sections for the name `.go.buildinfo`
# (in addition to looking for the BUILDINFO_MAGIC) to check if an executable is go or not.
# See references to the `errNotGoExe` error here:
# https://github.com/golang/go/blob/master/src/debug/buildinfo/buildinfo.go#L41
for shdr in elf.section_headers:
if shdr.get_name(elf) == ".go.buildinfo":
logger.debug("go buildinfo: found section .go.buildinfo")
return True
# other strategy used by FLOSS: search for known runtime strings.
# https://github.com/mandiant/flare-floss/blob/b2ca8adfc5edf278861dd6bff67d73da39683b46/floss/language/identify.py#L88
return False
def get_go_buildinfo_data(elf: ELF) -> Optional[bytes]:
for shdr in elf.section_headers:
if shdr.get_name(elf) == ".go.buildinfo":
logger.debug("go buildinfo: found section .go.buildinfo")
return shdr.buf
PT_LOAD = 0x1
PF_X = 1
PF_W = 2
for phdr in elf.program_headers:
if phdr.type != PT_LOAD:
continue
if (phdr.flags & (PF_X | PF_W)) == PF_W:
logger.debug("go buildinfo: found data segment")
return phdr.buf
return None
def read_data(elf: ELF, rva: int, size: int) -> Optional[bytes]:
# ELF segments are for runtime data,
# ELF sections are for link-time data.
# So we want to read Program Headers/Segments.
for phdr in elf.program_headers:
if phdr.vaddr <= rva < phdr.vaddr + phdr.memsz:
segment_data = phdr.buf
# pad the section with NULLs
# assume page alignment is already handled.
# might need more hardening here.
if len(segment_data) < phdr.memsz:
segment_data += b"\x00" * (phdr.memsz - len(segment_data))
segment_offset = rva - phdr.vaddr
return segment_data[segment_offset : segment_offset + size]
return None
def read_go_slice(elf: ELF, rva: int) -> Optional[bytes]:
if elf.bitness == 32:
struct_size = 8
struct_format = elf.endian + "II"
elif elf.bitness == 64:
struct_size = 16
struct_format = elf.endian + "QQ"
else:
raise ValueError("invalid psize")
struct_buf = read_data(elf, rva, struct_size)
if not struct_buf:
return None
addr, length = struct.unpack_from(struct_format, struct_buf, 0)
return read_data(elf, addr, length)
def guess_os_from_go_buildinfo(elf: ELF) -> Optional[OS]:
"""
In a binary compiled by Go, the buildinfo structure may contain
metadata about the build environment, including the configured
GOOS, which specifies the target operating system.
Search for and parse the buildinfo structure,
which may be found in the .go.buildinfo section,
and often contains this metadata inline. Otherwise,
follow a few byte slices to the relevant information.
This strategy is derived from GoReSym.
"""
buf = get_go_buildinfo_data(elf)
if not buf:
logger.debug("go buildinfo: no buildinfo section")
return None
assert isinstance(buf, bytes)
# The build info blob left by the linker is identified by
# a 16-byte header, consisting of:
# - buildInfoMagic (14 bytes),
# - the binary's pointer size (1 byte), and
# - whether the binary is big endian (1 byte).
#
# Then:
# - virtual address to Go string: runtime.buildVersion
# - virtual address to Go string: runtime.modinfo
#
# On 32-bit platforms, the last 8 bytes are unused.
#
# If the endianness has the 2 bit set, then the pointers are zero,
# and the 32-byte header is followed by varint-prefixed string data
# for the two string values we care about.
# https://github.com/mandiant/GoReSym/blob/0860a1b1b4f3495e9fb7e71eb4386bf3e0a7c500/buildinfo/buildinfo.go#L185-L193
BUILDINFO_MAGIC = b"\xFF Go buildinf:"
try:
index = buf.index(BUILDINFO_MAGIC)
except ValueError:
logger.debug("go buildinfo: no buildinfo magic")
return None
psize, flags = struct.unpack_from("<bb", buf, index + len(BUILDINFO_MAGIC))
assert psize in (4, 8)
is_big_endian = flags & 0b01
has_inline_strings = flags & 0b10
logger.debug("go buildinfo: psize: %d big endian: %s inline: %s", psize, is_big_endian, has_inline_strings)
GOOS_TO_OS = {
b"aix": OS.AIX,
b"android": OS.ANDROID,
b"dragonfly": OS.DRAGONFLYBSD,
b"freebsd": OS.FREEBSD,
b"hurd": OS.HURD,
b"illumos": OS.ILLUMOS,
b"linux": OS.LINUX,
b"netbsd": OS.NETBSD,
b"openbsd": OS.OPENBSD,
b"solaris": OS.SOLARIS,
b"zos": OS.ZOS,
b"windows": None, # PE format
b"plan9": None, # a.out format
b"ios": None, # Mach-O format
b"darwin": None, # Mach-O format
b"nacl": None, # dropped in GO 1.14
b"js": None,
}
if has_inline_strings:
# This is the common case/path. Most samples will have an inline GOOS string.
#
# To find samples on VT, use these VTGrep searches:
#
# content: {ff 20 47 6f 20 62 75 69 6c 64 69 6e 66 3a 04 02}
# content: {ff 20 47 6f 20 62 75 69 6c 64 69 6e 66 3a 08 02}
# If present, the GOOS key will be found within
# the current buildinfo data region.
#
# Brute force the k-v pair, like `GOOS=linux`,
# rather than try to parse the data, which would be fragile.
for key, os in GOOS_TO_OS.items():
if (b"GOOS=" + key) in buf:
logger.debug("go buildinfo: found os: %s", os)
return os
else:
# This is the uncommon path. Most samples will have an inline GOOS string.
#
# To find samples on VT, use the referenced VTGrep content searches.
info_format = {
# content: {ff 20 47 6f 20 62 75 69 6c 64 69 6e 66 3a 04 00}
# like: 71e617e5cc7fda89bf67422ff60f437e9d54622382c5ed6ff31f75e601f9b22e
# in which the modinfo doesn't have GOOS.
(4, False): "<II",
# content: {ff 20 47 6f 20 62 75 69 6c 64 69 6e 66 3a 08 00}
# like: 93d3b3e2a904c6c909e20f2f76c3c2e8d0c81d535eb46e5493b5701f461816c3
# in which the modinfo doesn't have GOOS.
(8, False): "<QQ",
# content: {ff 20 47 6f 20 62 75 69 6c 64 69 6e 66 3a 04 01}
# (no matches on VT today)
(4, True): ">II",
# content: {ff 20 47 6f 20 62 75 69 6c 64 69 6e 66 3a 08 01}
# like: d44ba497964050c0e3dd2a192c511e4c3c4f17717f0322a554d64b797ee4690a
# in which the modinfo doesn't have GOOS.
(8, True): ">QQ",
}
build_version_address, modinfo_address = struct.unpack_from(
info_format[(psize, is_big_endian)], buf, index + 0x10
)
logger.debug("go buildinfo: build version address: 0x%x", build_version_address)
logger.debug("go buildinfo: modinfo address: 0x%x", modinfo_address)
build_version = read_go_slice(elf, build_version_address)
if build_version:
logger.debug("go buildinfo: build version: %s", build_version.decode("utf-8"))
modinfo = read_go_slice(elf, modinfo_address)
if modinfo:
if modinfo[-0x11] == ord("\n"):
# Strip module framing: sentinel strings delimiting the module info.
# These are cmd/go/internal/modload/build.infoStart and infoEnd.
# Which should probably be:
# infoStart, _ = hex.DecodeString("3077af0c9274080241e1c107e6d618e6")
# infoEnd, _ = hex.DecodeString("f932433186182072008242104116d8f2")
modinfo = modinfo[0x10:-0x10]
logger.debug("go buildinfo: modinfo: %s", modinfo.decode("utf-8"))
if not modinfo:
return None
for key, os in GOOS_TO_OS.items():
# Brute force the k-v pair, like `GOOS=linux`,
# rather than try to parse the data, which would be fragile.
if (b"GOOS=" + key) in modinfo:
logger.debug("go buildinfo: found os: %s", os)
return os
return None
def guess_os_from_go_source(elf: ELF) -> Optional[OS]:
"""
In a binary compiled by Go, runtime metadata may contain
references to the source filenames, including the
src/runtime/os_* files, whose name indicates the
target operating system.
Confirm the given ELF seems to be built by Go,
and then look for strings that look like
Go source filenames.
This strategy is derived from GoReSym.
"""
if not is_go_binary(elf):
return None
for phdr in elf.program_headers:
buf = phdr.buf
NEEDLE_OS = b"/src/runtime/os_"
try:
index = buf.index(NEEDLE_OS)
except ValueError:
continue
rest = buf[index + len(NEEDLE_OS) : index + len(NEEDLE_OS) + 32]
filename = rest.partition(b".go")[0].decode("utf-8")
logger.debug("go source: filename: /src/runtime/os_%s.go", filename)
# via: https://cs.opensource.google/go/go/+/master:src/runtime/;bpv=1;bpt=0
# candidates today:
# - aix
# - android
# - darwin
# - darwin_arm64
# - dragonfly
# - freebsd
# - freebsd2
# - freebsd_amd64
# - freebsd_arm
# - freebsd_arm64
# - freebsd_noauxv
# - freebsd_riscv64
# - illumos
# - js
# - linux
# - linux_arm
# - linux_arm64
# - linux_be64
# - linux_generic
# - linux_loong64
# - linux_mips64x
# - linux_mipsx
# - linux_noauxv
# - linux_novdso
# - linux_ppc64x
# - linux_riscv64
# - linux_s390x
# - linux_x86
# - netbsd
# - netbsd_386
# - netbsd_amd64
# - netbsd_arm
# - netbsd_arm64
# - nonopenbsd
# - only_solaris
# - openbsd
# - openbsd_arm
# - openbsd_arm64
# - openbsd_libc
# - openbsd_mips64
# - openbsd_syscall
# - openbsd_syscall1
# - openbsd_syscall2
# - plan9
# - plan9_arm
# - solaris
# - unix
# - unix_nonlinux
# - wasip1
# - wasm
# - windows
# - windows_arm
# - windows_arm64
OS_FILENAME_TO_OS = {
"aix": OS.AIX,
"android": OS.ANDROID,
"dragonfly": OS.DRAGONFLYBSD,
"freebsd": OS.FREEBSD,
"freebsd2": OS.FREEBSD,
"freebsd_": OS.FREEBSD,
"illumos": OS.ILLUMOS,
"linux": OS.LINUX,
"netbsd": OS.NETBSD,
"only_solaris": OS.SOLARIS,
"openbsd": OS.OPENBSD,
"solaris": OS.SOLARIS,
"unix_nonlinux": OS.UNIX,
}
for prefix, os in OS_FILENAME_TO_OS.items():
if filename.startswith(prefix):
return os
for phdr in elf.program_headers:
buf = phdr.buf
NEEDLE_RT0 = b"/src/runtime/rt0_"
try:
index = buf.index(NEEDLE_RT0)
except ValueError:
continue
rest = buf[index + len(NEEDLE_RT0) : index + len(NEEDLE_RT0) + 32]
filename = rest.partition(b".s")[0].decode("utf-8")
logger.debug("go source: filename: /src/runtime/rt0_%s.s", filename)
# via: https://cs.opensource.google/go/go/+/master:src/runtime/;bpv=1;bpt=0
# candidates today:
# - aix_ppc64
# - android_386
# - android_amd64
# - android_arm
# - android_arm64
# - darwin_amd64
# - darwin_arm64
# - dragonfly_amd64
# - freebsd_386
# - freebsd_amd64
# - freebsd_arm
# - freebsd_arm64
# - freebsd_riscv64
# - illumos_amd64
# - ios_amd64
# - ios_arm64
# - js_wasm
# - linux_386
# - linux_amd64
# - linux_arm
# - linux_arm64
# - linux_loong64
# - linux_mips64x
# - linux_mipsx
# - linux_ppc64
# - linux_ppc64le
# - linux_riscv64
# - linux_s390x
# - netbsd_386
# - netbsd_amd64
# - netbsd_arm
# - netbsd_arm64
# - openbsd_386
# - openbsd_amd64
# - openbsd_arm
# - openbsd_arm64
# - openbsd_mips64
# - openbsd_ppc64
# - openbsd_riscv64
# - plan9_386
# - plan9_amd64
# - plan9_arm
# - solaris_amd64
# - wasip1_wasm
# - windows_386
# - windows_amd64
# - windows_arm
# - windows_arm64
RT0_FILENAME_TO_OS = {
"aix": OS.AIX,
"android": OS.ANDROID,
"dragonfly": OS.DRAGONFLYBSD,
"freebsd": OS.FREEBSD,
"illumos": OS.ILLUMOS,
"linux": OS.LINUX,
"netbsd": OS.NETBSD,
"openbsd": OS.OPENBSD,
"solaris": OS.SOLARIS,
}
for prefix, os in RT0_FILENAME_TO_OS.items():
if filename.startswith(prefix):
return os
return None
def guess_os_from_vdso_strings(elf: ELF) -> Optional[OS]:
"""
The "vDSO" (virtual dynamic shared object) is a small shared
library that the kernel automatically maps into the address space
of all user-space applications.
Some statically linked executables include small dynamic linker
routines that finds these vDSO symbols, using the ASCII
symbol name and version. We can therefore recognize the pairs
(symbol, version) to guess the binary targets Linux.
"""
for phdr in elf.program_headers:
buf = phdr.buf
# We don't really use the arch, but its interesting for documentation
# I suppose we could restrict the arch here to what's in the ELF header,
# but that's even more work. Let's see if this is sufficient.
for arch, symbol, version in (
# via: https://man7.org/linux/man-pages/man7/vdso.7.html
("arm", b"__vdso_gettimeofday", b"LINUX_2.6"),
("arm", b"__vdso_clock_gettime", b"LINUX_2.6"),
("aarch64", b"__kernel_rt_sigreturn", b"LINUX_2.6.39"),
("aarch64", b"__kernel_gettimeofday", b"LINUX_2.6.39"),
("aarch64", b"__kernel_clock_gettime", b"LINUX_2.6.39"),
("aarch64", b"__kernel_clock_getres", b"LINUX_2.6.39"),
("mips", b"__kernel_gettimeofday", b"LINUX_2.6"),
("mips", b"__kernel_clock_gettime", b"LINUX_2.6"),
("ia64", b"__kernel_sigtramp", b"LINUX_2.5"),
("ia64", b"__kernel_syscall_via_break", b"LINUX_2.5"),
("ia64", b"__kernel_syscall_via_epc", b"LINUX_2.5"),
("ppc/32", b"__kernel_clock_getres", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_clock_gettime", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_clock_gettime64", b"LINUX_5.11"),
("ppc/32", b"__kernel_datapage_offset", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_get_syscall_map", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_get_tbfreq", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_getcpu", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_gettimeofday", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_sigtramp_rt32", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_sigtramp32", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_sync_dicache", b"LINUX_2.6.15"),
("ppc/32", b"__kernel_sync_dicache_p5", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_clock_getres", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_clock_gettime", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_datapage_offset", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_get_syscall_map", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_get_tbfreq", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_getcpu", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_gettimeofday", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_sigtramp_rt64", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_sync_dicache", b"LINUX_2.6.15"),
("ppc/64", b"__kernel_sync_dicache_p5", b"LINUX_2.6.15"),
("riscv", b"__vdso_rt_sigreturn", b"LINUX_4.15"),
("riscv", b"__vdso_gettimeofday", b"LINUX_4.15"),
("riscv", b"__vdso_clock_gettime", b"LINUX_4.15"),
("riscv", b"__vdso_clock_getres", b"LINUX_4.15"),
("riscv", b"__vdso_getcpu", b"LINUX_4.15"),
("riscv", b"__vdso_flush_icache", b"LINUX_4.15"),
("s390", b"__kernel_clock_getres", b"LINUX_2.6.29"),
("s390", b"__kernel_clock_gettime", b"LINUX_2.6.29"),
("s390", b"__kernel_gettimeofday", b"LINUX_2.6.29"),
("superh", b"__kernel_rt_sigreturn", b"LINUX_2.6"),
("superh", b"__kernel_sigreturn", b"LINUX_2.6"),
("superh", b"__kernel_vsyscall", b"LINUX_2.6"),
("i386", b"__kernel_sigreturn", b"LINUX_2.5"),
("i386", b"__kernel_rt_sigreturn", b"LINUX_2.5"),
("i386", b"__kernel_vsyscall", b"LINUX_2.5"),
("i386", b"__vdso_clock_gettime", b"LINUX_2.6"),
("i386", b"__vdso_gettimeofday", b"LINUX_2.6"),
("i386", b"__vdso_time", b"LINUX_2.6"),
("x86-64", b"__vdso_clock_gettime", b"LINUX_2.6"),
("x86-64", b"__vdso_getcpu", b"LINUX_2.6"),
("x86-64", b"__vdso_gettimeofday", b"LINUX_2.6"),
("x86-64", b"__vdso_time", b"LINUX_2.6"),
("x86/32", b"__vdso_clock_gettime", b"LINUX_2.6"),
("x86/32", b"__vdso_getcpu", b"LINUX_2.6"),
("x86/32", b"__vdso_gettimeofday", b"LINUX_2.6"),
("x86/32", b"__vdso_time", b"LINUX_2.6"),
):
if symbol in buf and version in buf:
logger.debug("vdso string: %s %s %s", arch, symbol.decode("ascii"), version.decode("ascii"))
return OS.LINUX
return None
def detect_elf_os(f) -> str:
"""
f: type Union[BinaryIO, IDAIO, GHIDRAIO]
@@ -1526,32 +1018,15 @@ def detect_elf_os(f) -> str:
logger.warning("Error guessing OS from symbol table: %s", e)
symtab_guess = None
try:
goos_guess = guess_os_from_go_buildinfo(elf)
logger.debug("guess: Go buildinfo: %s", goos_guess)
except Exception as e:
logger.warning("Error guessing OS from Go buildinfo: %s", e)
goos_guess = None
try:
gosrc_guess = guess_os_from_go_source(elf)
logger.debug("guess: Go source: %s", gosrc_guess)
except Exception as e:
logger.warning("Error guessing OS from Go source path: %s", e)
gosrc_guess = None
try:
vdso_guess = guess_os_from_vdso_strings(elf)
logger.debug("guess: vdso strings: %s", vdso_guess)
except Exception as e:
logger.warning("Error guessing OS from vdso strings: %s", e)
symtab_guess = None
ret = None
if osabi_guess:
ret = osabi_guess
elif ident_guess:
# we don't trust this too much due to non-cross-compilation assumptions
ret = ident_guess
elif ph_notes_guess:
ret = ph_notes_guess
@@ -1570,24 +1045,6 @@ def detect_elf_os(f) -> str:
elif symtab_guess:
ret = symtab_guess
elif goos_guess:
ret = goos_guess
elif gosrc_guess:
# prefer goos_guess to this method,
# which is just string interpretation.
ret = gosrc_guess
elif ident_guess:
# at the bottom because we don't trust this too much
# due to potential for bugs with cross-compilation.
ret = ident_guess
elif vdso_guess:
# at the bottom because this is just scanning strings,
# which isn't very authoritative.
ret = vdso_guess
return ret.value if ret is not None else "unknown"

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -10,7 +10,8 @@ import logging
from typing import Tuple, Iterator
from pathlib import Path
from elftools.elf.elffile import ELFFile, DynamicSegment, SymbolTableSection
from elftools.elf.elffile import ELFFile, SymbolTableSection
from elftools.elf.relocation import RelocationSection
import capa.features.extractors.common
from capa.features.file import Export, Import, Section
@@ -46,47 +47,17 @@ def extract_file_export_names(elf: ELFFile, **kwargs):
yield Export(symbol.name), AbsoluteVirtualAddress(symbol.entry.st_value)
for segment in elf.iter_segments():
if not isinstance(segment, DynamicSegment):
continue
tab_ptr, tab_offset = segment.get_table_offset("DT_SYMTAB")
if tab_ptr is None or tab_offset is None:
logger.debug("Dynamic segment doesn't contain DT_SYMTAB")
continue
logger.debug("Dynamic segment contains %s symbols: ", segment.num_symbols())
for symbol in segment.iter_symbols():
# The following conditions are based on the following article
# http://www.m4b.io/elf/export/binary/analysis/2015/05/25/what-is-an-elf-export.html
if not symbol.name:
continue
if symbol.entry.st_info.type not in ["STT_FUNC", "STT_OBJECT", "STT_IFUNC"]:
continue
if symbol.entry.st_value == 0:
continue
if symbol.entry.st_shndx == "SHN_UNDEF":
continue
yield Export(symbol.name), AbsoluteVirtualAddress(symbol.entry.st_value)
def extract_file_import_names(elf: ELFFile, **kwargs):
# Create a dictionary to store symbol names by their index
symbol_names = {}
# Extract symbol names and store them in the dictionary
for segment in elf.iter_segments():
if not isinstance(segment, DynamicSegment):
for section in elf.iter_sections():
if not isinstance(section, SymbolTableSection):
continue
tab_ptr, tab_offset = segment.get_table_offset("DT_SYMTAB")
if tab_ptr is None or tab_offset is None:
logger.debug("Dynamic segment doesn't contain DT_SYMTAB")
continue
for _, symbol in enumerate(segment.iter_symbols()):
for _, symbol in enumerate(section.iter_symbols()):
# The following conditions are based on the following article
# http://www.m4b.io/elf/export/binary/analysis/2015/05/25/what-is-an-elf-export.html
if not symbol.name:
@@ -102,28 +73,21 @@ def extract_file_import_names(elf: ELFFile, **kwargs):
symbol_names[_] = symbol.name
for segment in elf.iter_segments():
if not isinstance(segment, DynamicSegment):
for section in elf.iter_sections():
if not isinstance(section, RelocationSection):
continue
relocation_tables = segment.get_relocation_tables()
logger.debug("Dynamic Segment contains %s relocation tables:", len(relocation_tables))
if section["sh_entsize"] == 0:
logger.debug("Symbol table '%s' has a sh_entsize of zero!", section.name)
continue
for relocation_table in relocation_tables.values():
relocations = []
for i in range(relocation_table.num_relocations()):
try:
relocations.append(relocation_table.get_relocation(i))
except TypeError:
# ELF is corrupt and the relocation table is invalid,
# so stop processing it.
break
logger.debug("Symbol table '%s' contains %s entries:", section.name, section.num_relocations())
for relocation in relocations:
# Extract the symbol name from the symbol table using the symbol index in the relocation
if relocation["r_info_sym"] not in symbol_names:
continue
yield Import(symbol_names[relocation["r_info_sym"]]), FileOffsetAddress(relocation["r_offset"])
for relocation in section.iter_relocations():
# Extract the symbol name from the symbol table using the symbol index in the relocation
if relocation["r_info_sym"] not in symbol_names:
continue
yield Import(symbol_names[relocation["r_info_sym"]]), FileOffsetAddress(relocation["r_offset"])
def extract_file_section_names(elf: ELFFile, **kwargs):
@@ -158,10 +122,6 @@ def extract_file_arch(elf: ELFFile, **kwargs):
yield Arch("i386"), NO_ADDRESS
elif arch == "x64":
yield Arch("amd64"), NO_ADDRESS
elif arch == "ARM":
yield Arch("arm"), NO_ADDRESS
elif arch == "AArch64":
yield Arch("aarch64"), NO_ADDRESS
else:
logger.warning("unsupported architecture: %s", arch)

View File

@@ -34,7 +34,7 @@ class GhidraFeatureExtractor(StaticFeatureExtractor):
# https://ghidra.re/ghidra_docs/api/ghidra/program/model/listing/Program.html
#
# the hashes are stored in the database, not computed on the fly,
# so it's probably not trivial to add SHA1.
# so its probably not trivial to add SHA1.
sha1="",
sha256=capa.ghidra.helpers.get_file_sha256(),
)

View File

@@ -260,7 +260,7 @@ def dereference_ptr(insn: ghidra.program.database.code.InstructionDB):
if thfunc and thfunc.isThunk():
return handle_thunk(to_deref)
else:
# if it doesn't point to a thunk, it's usually a jmp to a label
# if it doesn't poin to a thunk, it's usually a jmp to a label
return to_deref
if not dat:
return to_deref

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -63,7 +63,6 @@ def generate_symbols(dll: str, symbol: str, include_dll=False) -> Iterator[str]:
# trim extensions observed in dynamic traces
dll = dll[0:-4] if dll.endswith(".dll") else dll
dll = dll[0:-4] if dll.endswith(".drv") else dll
dll = dll[0:-3] if dll.endswith(".so") else dll
if include_dll or is_ordinal(symbol):
# ws2_32.#1

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -8,6 +8,7 @@
from typing import List, Tuple, Iterator
import idaapi
import ida_nalt
import capa.ida.helpers
import capa.features.extractors.elf
@@ -31,9 +32,7 @@ class IdaFeatureExtractor(StaticFeatureExtractor):
def __init__(self):
super().__init__(
hashes=SampleHashes(
md5=capa.ida.helpers.retrieve_input_file_md5(),
sha1="(unknown)",
sha256=capa.ida.helpers.retrieve_input_file_sha256(),
md5=ida_nalt.retrieve_input_file_md5(), sha1="(unknown)", sha256=ida_nalt.retrieve_input_file_sha256()
)
)
self.global_features: List[Tuple[Feature, Address]] = []

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -14,7 +14,6 @@ import idaapi
import idautils
import ida_entry
import capa.ida.helpers
import capa.features.extractors.common
import capa.features.extractors.helpers
import capa.features.extractors.strings
@@ -178,17 +177,17 @@ def extract_file_function_names() -> Iterator[Tuple[Feature, Address]]:
def extract_file_format() -> Iterator[Tuple[Feature, Address]]:
filetype = capa.ida.helpers.get_filetype()
file_info = idaapi.get_inf_structure()
if filetype in (idaapi.f_PE, idaapi.f_COFF):
if file_info.filetype in (idaapi.f_PE, idaapi.f_COFF):
yield Format(FORMAT_PE), NO_ADDRESS
elif filetype == idaapi.f_ELF:
elif file_info.filetype == idaapi.f_ELF:
yield Format(FORMAT_ELF), NO_ADDRESS
elif filetype == idaapi.f_BIN:
elif file_info.filetype == idaapi.f_BIN:
# no file type to return when processing a binary file, but we want to continue processing
return
else:
raise NotImplementedError(f"unexpected file format: {filetype}")
raise NotImplementedError(f"unexpected file format: {file_info.filetype}")
def extract_features() -> Iterator[Tuple[Feature, Address]]:

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -9,6 +9,7 @@ import logging
import contextlib
from typing import Tuple, Iterator
import idaapi
import ida_loader
import capa.ida.helpers
@@ -47,12 +48,12 @@ def extract_os() -> Iterator[Tuple[Feature, Address]]:
def extract_arch() -> Iterator[Tuple[Feature, Address]]:
procname = capa.ida.helpers.get_processor_name()
if procname == "metapc" and capa.ida.helpers.is_64bit():
info: idaapi.idainfo = idaapi.get_inf_structure()
if info.procname == "metapc" and info.is_64bit():
yield Arch(ARCH_AMD64), NO_ADDRESS
elif procname == "metapc" and capa.ida.helpers.is_32bit():
elif info.procname == "metapc" and info.is_32bit():
yield Arch(ARCH_I386), NO_ADDRESS
elif procname == "metapc":
elif info.procname == "metapc":
logger.debug("unsupported architecture: non-32-bit nor non-64-bit intel")
return
else:
@@ -60,5 +61,5 @@ def extract_arch() -> Iterator[Tuple[Feature, Address]]:
# 1. handling a new architecture (e.g. aarch64)
#
# for (1), this logic will need to be updated as the format is implemented.
logger.debug("unsupported architecture: %s", procname)
logger.debug("unsupported architecture: %s", info.procname)
return

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -10,7 +10,6 @@ from typing import Any, Dict, Tuple, Iterator, Optional
import idc
import idaapi
import ida_nalt
import idautils
import ida_bytes
import ida_segment
@@ -18,52 +17,24 @@ import ida_segment
from capa.features.address import AbsoluteVirtualAddress
from capa.features.extractors.base_extractor import FunctionHandle
IDA_NALT_ENCODING = ida_nalt.get_default_encoding_idx(ida_nalt.BPU_1B) # use one byte-per-character encoding
def find_byte_sequence(start: int, end: int, seq: bytes) -> Iterator[int]:
"""yield all ea of a given byte sequence
if hasattr(ida_bytes, "parse_binpat_str"):
# TODO (mr): use find_bytes
# https://github.com/mandiant/capa/issues/2339
def find_byte_sequence(start: int, end: int, seq: bytes) -> Iterator[int]:
"""yield all ea of a given byte sequence
args:
start: min virtual address
end: max virtual address
seq: bytes to search e.g. b"\x01\x03"
"""
patterns = ida_bytes.compiled_binpat_vec_t()
seqstr = " ".join([f"{b:02x}" for b in seq])
err = ida_bytes.parse_binpat_str(patterns, 0, seqstr, 16, IDA_NALT_ENCODING)
if err:
return
while True:
ea, _ = ida_bytes.bin_search(start, end, patterns, ida_bytes.BIN_SEARCH_FORWARD)
if ea == idaapi.BADADDR:
break
start = ea + 1
yield ea
else:
# for IDA 7.5 and older; using deprecated find_binary instead of bin_search
def find_byte_sequence(start: int, end: int, seq: bytes) -> Iterator[int]:
"""yield all ea of a given byte sequence
args:
start: min virtual address
end: max virtual address
seq: bytes to search e.g. b"\x01\x03"
"""
seqstr = " ".join([f"{b:02x}" for b in seq])
while True:
ea = idaapi.find_binary(start, end, seqstr, 0, idaapi.SEARCH_DOWN)
if ea == idaapi.BADADDR:
break
start = ea + 1
yield ea
args:
start: min virtual address
end: max virtual address
seq: bytes to search e.g. b"\x01\x03"
"""
seqstr = " ".join([f"{b:02x}" for b in seq])
while True:
# TODO(mike-hunhoff): find_binary is deprecated. Please use ida_bytes.bin_search() instead.
# https://github.com/mandiant/capa/issues/1606
ea = idaapi.find_binary(start, end, seqstr, 0, idaapi.SEARCH_DOWN)
if ea == idaapi.BADADDR:
break
start = ea + 1
yield ea
def get_functions(

View File

@@ -1,117 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import os
import sys
import json
import logging
import importlib.util
from typing import Optional
from pathlib import Path
logger = logging.getLogger(__name__)
def is_idalib_installed() -> bool:
try:
return importlib.util.find_spec("idapro") is not None
except ModuleNotFoundError:
return False
def get_idalib_user_config_path() -> Optional[Path]:
"""Get the path to the user's config file based on platform following IDA's user directories."""
# derived from `py-activate-idalib.py` from IDA v9.0 Beta 4
if sys.platform == "win32":
# On Windows, use the %APPDATA%\Hex-Rays\IDA Pro directory
config_dir = Path(os.getenv("APPDATA")) / "Hex-Rays" / "IDA Pro"
else:
# On macOS and Linux, use ~/.idapro
config_dir = Path.home() / ".idapro"
# Return the full path to the config file (now in JSON format)
user_config_path = config_dir / "ida-config.json"
if not user_config_path.exists():
return None
return user_config_path
def find_idalib() -> Optional[Path]:
config_path = get_idalib_user_config_path()
if not config_path:
logger.error("IDA Pro user configuration does not exist, please make sure you've installed idalib properly.")
return None
config = json.loads(config_path.read_text(encoding="utf-8"))
try:
ida_install_dir = Path(config["Paths"]["ida-install-dir"])
except KeyError:
logger.error(
"IDA Pro user configuration does not contain location of IDA Pro installation, please make sure you've installed idalib properly."
)
return None
if not ida_install_dir.exists():
return None
libname = {
"win32": "idalib.dll",
"linux": "libidalib.so",
"linux2": "libidalib.so",
"darwin": "libidalib.dylib",
}[sys.platform]
if not (ida_install_dir / "ida.hlp").is_file():
return None
if not (ida_install_dir / libname).is_file():
return None
idalib_path = ida_install_dir / "idalib" / "python"
if not idalib_path.exists():
return None
if not (idalib_path / "idapro" / "__init__.py").is_file():
return None
return idalib_path
def has_idalib() -> bool:
if is_idalib_installed():
logger.debug("found installed IDA idalib API")
return True
logger.debug("IDA idalib API not installed, searching...")
idalib_path = find_idalib()
if not idalib_path:
logger.debug("failed to find IDA idalib installation")
logger.debug("found IDA idalib API: %s", idalib_path)
return idalib_path is not None
def load_idalib() -> bool:
try:
import idapro
return True
except ImportError:
idalib_path = find_idalib()
if not idalib_path:
return False
sys.path.append(idalib_path.absolute().as_posix())
try:
import idapro # noqa: F401 unused import
return True
except ImportError:
return False

View File

@@ -1,15 +1,13 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import re
from typing import Any, Dict, Tuple, Iterator, Optional
from typing import Any, Dict, Tuple, Iterator
import idc
import ida_ua
import idaapi
import idautils
@@ -37,9 +35,9 @@ def get_externs(ctx: Dict[str, Any]) -> Dict[int, Any]:
return ctx["externs_cache"]
def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Optional[Tuple[str, str]]:
def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Iterator[Any]:
"""check instruction for API call"""
info = None
info = ()
ref = insn.ea
# attempt to resolve API calls by following chained thunks to a reasonable depth
@@ -54,7 +52,7 @@ def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Optional[T
except IndexError:
break
info = funcs.get(ref)
info = funcs.get(ref, ())
if info:
break
@@ -62,7 +60,8 @@ def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Optional[T
if not f or not (f.flags & idaapi.FUNC_THUNK):
break
return info
if info:
yield info
def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
@@ -77,39 +76,16 @@ def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle)
if insn.get_canon_mnem() not in ("call", "jmp"):
return
# check call to imported functions
api = check_for_api_call(insn, get_imports(fh.ctx))
if api:
# check calls to imported functions
for api in check_for_api_call(insn, get_imports(fh.ctx)):
# tuple (<module>, <function>, <ordinal>)
for name in capa.features.extractors.helpers.generate_symbols(api[0], api[1]):
yield API(name), ih.address
# a call instruction should only call one function, stop if a call to an import is extracted
return
# check call to extern functions
api = check_for_api_call(insn, get_externs(fh.ctx))
if api:
# check calls to extern functions
for api in check_for_api_call(insn, get_externs(fh.ctx)):
# tuple (<module>, <function>, <ordinal>)
yield API(api[1]), ih.address
# a call instruction should only call one function, stop if a call to an extern is extracted
return
# extract dynamically resolved APIs stored in renamed globals (renamed for example using `renimp.idc`)
# examples: `CreateProcessA`, `HttpSendRequestA`
if insn.Op1.type == ida_ua.o_mem:
op_addr = insn.Op1.addr
op_name = idaapi.get_name(op_addr)
# when renaming a global using an API name, IDA assigns it the function type
# ensure we do not extract something wrong by checking that the address has a name and a type
# we could check that the type is a function definition, but that complicates the code
if (not op_name.startswith("off_")) and idc.get_type(op_addr):
# Remove suffix used in repeated names, for example _0 in VirtualFree_0
match = re.match(r"(.+)_\d+", op_name)
if match:
op_name = match.group(1)
# the global name does not include the DLL name, so we can't extract it
for name in capa.features.extractors.helpers.generate_symbols("", op_name):
yield API(name), ih.address
# extract IDA/FLIRT recognized API functions
targets = tuple(idautils.CodeRefsFrom(insn.ea, False))

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -130,13 +130,7 @@ def extract_file_arch(pe, **kwargs):
elif pe.FILE_HEADER.Machine == pefile.MACHINE_TYPE["IMAGE_FILE_MACHINE_AMD64"]:
yield Arch(ARCH_AMD64), NO_ADDRESS
else:
try:
logger.warning(
"unsupported architecture: %s",
pefile.MACHINE_TYPE[pe.FILE_HEADER.Machine],
)
except KeyError:
logger.warning("unknown architecture: %s", pe.FILE_HEADER.Machine)
logger.warning("unsupported architecture: %s", pefile.MACHINE_TYPE[pe.FILE_HEADER.Machine])
def extract_file_features(pe, buf):

View File

@@ -1,6 +1,6 @@
# strings code from FLOSS, https://github.com/mandiant/flare-floss
#
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
@@ -113,7 +113,7 @@ def extract_insn_api_features(fh: FunctionHandle, bb, ih: InsnHandle) -> Iterato
if f.vw.metadata["Format"] == "elf":
if "symtab" not in fh.ctx["cache"]:
# the symbol table gets stored as a function's attribute in order to avoid running
# this code every time the call is made, thus preventing the computational overhead.
# this code everytime the call is made, thus preventing the computational overhead.
try:
fh.ctx["cache"]["symtab"] = SymTab.from_viv(f.vw.parsedbin)
except Exception:
@@ -598,7 +598,7 @@ def extract_op_number_features(
if f.vw.probeMemory(v, 1, envi.memory.MM_READ):
# this is a valid address
# assume it's not also a constant.
# assume its not also a constant.
return
if insn.mnem == "add" and insn.opers[0].isReg() and insn.opers[0].reg == envi.archs.i386.regs.REG_ESP:

View File

@@ -1,198 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Dict, List, Tuple, Optional
from pathlib import Path
from zipfile import ZipFile
from collections import defaultdict
from dataclasses import dataclass
from capa.exceptions import UnsupportedFormatError
from capa.features.extractors.vmray.models import File, Flog, SummaryV2, StaticData, FunctionCall, xml_to_dict
logger = logging.getLogger(__name__)
DEFAULT_ARCHIVE_PASSWORD = b"infected"
SUPPORTED_FLOG_VERSIONS = ("2",)
@dataclass
class VMRayMonitorThread:
tid: int # thread ID assigned by OS
monitor_id: int # unique ID assigned to thread by VMRay
process_monitor_id: int # unqiue ID assigned to containing process by VMRay
@dataclass
class VMRayMonitorProcess:
pid: int # process ID assigned by OS
ppid: int # parent process ID assigned by OS
monitor_id: int # unique ID assigned to process by VMRay
image_name: str
class VMRayAnalysis:
def __init__(self, zipfile_path: Path):
self.zipfile = ZipFile(zipfile_path, "r")
# summary_v2.json is the entry point to the entire VMRay archive and
# we use its data to find everything else that we need for capa
self.sv2 = SummaryV2.model_validate_json(
self.zipfile.read("logs/summary_v2.json", pwd=DEFAULT_ARCHIVE_PASSWORD)
)
self.file_type: str = self.sv2.analysis_metadata.sample_type
# flog.xml contains all of the call information that VMRay captured during execution
flog_xml = self.zipfile.read("logs/flog.xml", pwd=DEFAULT_ARCHIVE_PASSWORD)
flog_dict = xml_to_dict(flog_xml)
self.flog = Flog.model_validate(flog_dict)
if self.flog.analysis.log_version not in SUPPORTED_FLOG_VERSIONS:
raise UnsupportedFormatError(
"VMRay feature extractor does not support flog version %s" % self.flog.analysis.log_version
)
self.exports: Dict[int, str] = {}
self.imports: Dict[int, Tuple[str, str]] = {}
self.sections: Dict[int, str] = {}
self.monitor_processes: Dict[int, VMRayMonitorProcess] = {}
self.monitor_threads: Dict[int, VMRayMonitorThread] = {}
# map monitor thread IDs to their associated monitor process ID
self.monitor_threads_by_monitor_process: Dict[int, List[int]] = defaultdict(list)
# map function calls to their associated monitor thread ID mapped to its associated monitor process ID
self.monitor_process_calls: Dict[int, Dict[int, List[FunctionCall]]] = defaultdict(lambda: defaultdict(list))
self.base_address: int
self.sample_file_name: Optional[str] = None
self.sample_file_analysis: Optional[File] = None
self.sample_file_static_data: Optional[StaticData] = None
self._find_sample_file()
# VMRay analysis archives in various shapes and sizes and file type does not definitively tell us what data
# we can expect to find in the archive, so to be explicit we check for the various pieces that we need at
# minimum to run capa analysis
if self.sample_file_name is None or self.sample_file_analysis is None:
raise UnsupportedFormatError("VMRay archive does not contain sample file (file_type: %s)" % self.file_type)
if not self.sample_file_static_data:
raise UnsupportedFormatError("VMRay archive does not contain static data (file_type: %s)" % self.file_type)
if not self.sample_file_static_data.pe and not self.sample_file_static_data.elf:
raise UnsupportedFormatError(
"VMRay feature extractor only supports PE and ELF at this time (file_type: %s)" % self.file_type
)
# VMRay does not store static strings for the sample file so we must use the source file
# stored in the archive
sample_sha256: str = self.sample_file_analysis.hash_values.sha256.lower()
sample_file_path: str = f"internal/static_analyses/{sample_sha256}/objects/files/{sample_sha256}"
logger.debug("file_type: %s, file_path: %s", self.file_type, sample_file_path)
self.sample_file_buf: bytes = self.zipfile.read(sample_file_path, pwd=DEFAULT_ARCHIVE_PASSWORD)
# do not change order, it matters
self._compute_base_address()
self._compute_imports()
self._compute_exports()
self._compute_sections()
self._compute_monitor_processes()
self._compute_monitor_threads()
self._compute_monitor_process_calls()
def _find_sample_file(self):
for file_name, file_analysis in self.sv2.files.items():
if file_analysis.is_sample:
# target the sample submitted for analysis
self.sample_file_name = file_name
self.sample_file_analysis = file_analysis
if file_analysis.ref_static_data:
# like "path": ["static_data","static_data_0"] where "static_data_0" is the summary_v2 static data
# key for the file's static data
self.sample_file_static_data = self.sv2.static_data[file_analysis.ref_static_data.path[1]]
break
def _compute_base_address(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
self.base_address = self.sample_file_static_data.pe.basic_info.image_base
def _compute_exports(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
for export in self.sample_file_static_data.pe.exports:
self.exports[export.address] = export.api.name
def _compute_imports(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
for module in self.sample_file_static_data.pe.imports:
for api in module.apis:
self.imports[api.address] = (module.dll, api.api.name)
def _compute_sections(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
for pefile_section in self.sample_file_static_data.pe.sections:
self.sections[pefile_section.virtual_address] = pefile_section.name
elif self.sample_file_static_data.elf:
for elffile_section in self.sample_file_static_data.elf.sections:
self.sections[elffile_section.header.sh_addr] = elffile_section.header.sh_name
def _compute_monitor_processes(self):
for process in self.sv2.processes.values():
# we expect monitor IDs to be unique
assert process.monitor_id not in self.monitor_processes
ppid: int = (
self.sv2.processes[process.ref_parent_process.path[1]].os_pid if process.ref_parent_process else 0
)
self.monitor_processes[process.monitor_id] = VMRayMonitorProcess(
process.os_pid, ppid, process.monitor_id, process.image_name
)
# not all processes are recorded in SummaryV2.json, get missing data from flog.xml, see #2394
for monitor_process in self.flog.analysis.monitor_processes:
vmray_monitor_process: VMRayMonitorProcess = VMRayMonitorProcess(
monitor_process.os_pid,
monitor_process.os_parent_pid,
monitor_process.process_id,
monitor_process.image_name,
)
if monitor_process.process_id not in self.monitor_processes:
self.monitor_processes[monitor_process.process_id] = vmray_monitor_process
else:
# we expect monitor processes recorded in both SummaryV2.json and flog.xml to equal
assert self.monitor_processes[monitor_process.process_id] == vmray_monitor_process
def _compute_monitor_threads(self):
for monitor_thread in self.flog.analysis.monitor_threads:
# we expect monitor IDs to be unique
assert monitor_thread.thread_id not in self.monitor_threads
self.monitor_threads[monitor_thread.thread_id] = VMRayMonitorThread(
monitor_thread.os_tid, monitor_thread.thread_id, monitor_thread.process_id
)
# we expect each monitor thread ID to be unique for its associated monitor process ID e.g. monitor
# thread ID 10 should not be captured twice for monitor process ID 1
assert monitor_thread.thread_id not in self.monitor_threads_by_monitor_process[monitor_thread.thread_id]
self.monitor_threads_by_monitor_process[monitor_thread.process_id].append(monitor_thread.thread_id)
def _compute_monitor_process_calls(self):
for function_call in self.flog.analysis.function_calls:
self.monitor_process_calls[function_call.process_id][function_call.thread_id].append(function_call)

Some files were not shown because too many files have changed in this diff Show More