mirror of
https://github.com/mandiant/capa.git
synced 2025-12-15 09:00:45 -08:00
Compare commits
176 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6b7582606 | ||
|
|
791f5e2359 | ||
|
|
c409b2b7ed | ||
|
|
4501955728 | ||
|
|
6b4591de14 | ||
|
|
00cce585d6 | ||
|
|
19e2097f79 | ||
|
|
b67bd4d084 | ||
|
|
854759cb43 | ||
|
|
348e0b3203 | ||
|
|
03e2195582 | ||
|
|
076bb13e2d | ||
|
|
76bd1460ba | ||
|
|
14a7bab890 | ||
|
|
8ca88d94d5 | ||
|
|
9d3f732b33 | ||
|
|
d3e3c966d6 | ||
|
|
e402aab41d | ||
|
|
c73abb8855 | ||
|
|
04071606cd | ||
|
|
19698b1ba1 | ||
|
|
25e9e18097 | ||
|
|
3a21648e78 | ||
|
|
8dcb7a473e | ||
|
|
cf91503dc3 | ||
|
|
d8691edd15 | ||
|
|
56a6f9c83e | ||
|
|
e25e68e169 | ||
|
|
728742a1ad | ||
|
|
da273824d1 | ||
|
|
7a6f63cf2b | ||
|
|
d62734ecc2 | ||
|
|
5ccb642929 | ||
|
|
8d5fcdf287 | ||
|
|
be8499238c | ||
|
|
40c7714c48 | ||
|
|
460590cec0 | ||
|
|
25d2ef30e7 | ||
|
|
71ae51ef69 | ||
|
|
216bfb968d | ||
|
|
32cb0365f8 | ||
|
|
b299e4bc1f | ||
|
|
bc2802fd72 | ||
|
|
81a14838bd | ||
|
|
1c9a86ca20 | ||
|
|
32fefa60cc | ||
|
|
09bbe80dfb | ||
|
|
239ad4a17e | ||
|
|
ab3b074c6a | ||
|
|
e863ce5ff3 | ||
|
|
8e4c0e3040 | ||
|
|
401a0ee0ff | ||
|
|
f69fabc2b0 | ||
|
|
87f691677c | ||
|
|
ea9853e667 | ||
|
|
312dd0d40f | ||
|
|
44cbe664e4 | ||
|
|
6b8e2b3e81 | ||
|
|
ba9ab7c876 | ||
|
|
1af97f6681 | ||
|
|
05575e1e92 | ||
|
|
9d137a207f | ||
|
|
850ae5a916 | ||
|
|
e8054c277d | ||
|
|
e8ea461456 | ||
|
|
bb8991af8e | ||
|
|
368f635387 | ||
|
|
287e4282a9 | ||
|
|
1f6ce48e40 | ||
|
|
7cb31cf23c | ||
|
|
01e6619182 | ||
|
|
20d7bf1402 | ||
|
|
6b8983c0c4 | ||
|
|
97bd4992b1 | ||
|
|
843fd34737 | ||
|
|
dfc19d8cb2 | ||
|
|
1564f24330 | ||
|
|
0d87bb0504 | ||
|
|
db423d9b0a | ||
|
|
ebfba543e6 | ||
|
|
46c464282e | ||
|
|
aa225dac5c | ||
|
|
c2376eaf7b | ||
|
|
6451fa433b | ||
|
|
765c7cb792 | ||
|
|
b675c9a77c | ||
|
|
ac081336ba | ||
|
|
a15eb835f4 | ||
|
|
fcdaabf34e | ||
|
|
283aa27152 | ||
|
|
f856ea7454 | ||
|
|
ebb778ae0d | ||
|
|
e9e5d2bb12 | ||
|
|
bb1ef6ca56 | ||
|
|
7e64306f1c | ||
|
|
6b19e7b372 | ||
|
|
bb60099ab6 | ||
|
|
d609203fcd | ||
|
|
fcf200f13f | ||
|
|
7cb93c8ebd | ||
|
|
eb69b383a4 | ||
|
|
04d127f69f | ||
|
|
9dd39926d7 | ||
|
|
13d14f6cb6 | ||
|
|
260da8ed2c | ||
|
|
a6884db1d3 | ||
|
|
67d3916c41 | ||
|
|
b0ffc86399 | ||
|
|
07b4e1f8a2 | ||
|
|
4137923c2e | ||
|
|
33be4d1f8e | ||
|
|
8e9eadf98a | ||
|
|
9107819cf1 | ||
|
|
b74738adcf | ||
|
|
b229048b51 | ||
|
|
4fe7f784e9 | ||
|
|
b7b8792f70 | ||
|
|
e637e5a09e | ||
|
|
0ea6f1e270 | ||
|
|
f6bc42540c | ||
|
|
62701a2837 | ||
|
|
f60e3fc531 | ||
|
|
b6f0ee539b | ||
|
|
e70e1b0641 | ||
|
|
b9c4cc681b | ||
|
|
13261d0c41 | ||
|
|
8476aeee35 | ||
|
|
38cf1f1041 | ||
|
|
d81b123e97 | ||
|
|
029259b8ed | ||
|
|
e3f695b947 | ||
|
|
d25c86c08b | ||
|
|
4aad53c5b3 | ||
|
|
0028da5270 | ||
|
|
cf3494d427 | ||
|
|
3f33b82ace | ||
|
|
12f1851ba5 | ||
|
|
6da0e5d985 | ||
|
|
e2e84f7f50 | ||
|
|
106c31735e | ||
|
|
277e9d1551 | ||
|
|
9db01e340c | ||
|
|
626ea51c20 | ||
|
|
fd686ac591 | ||
|
|
17aab2c4fc | ||
|
|
216ac8dd96 | ||
|
|
d68e057439 | ||
|
|
3c2749734c | ||
|
|
5c60efa81f | ||
|
|
09d86245e5 | ||
|
|
2862cb35c2 | ||
|
|
c3aa306d6c | ||
|
|
6bec5d40bd | ||
|
|
da6c6cfb48 | ||
|
|
9353e46615 | ||
|
|
76913af20b | ||
|
|
bb86d1485c | ||
|
|
cd3086cfa4 | ||
|
|
120f34e8ef | ||
|
|
5495a8555c | ||
|
|
1a447013bd | ||
|
|
fccb533841 | ||
|
|
3b165c3d8e | ||
|
|
cd5199f873 | ||
|
|
202b5ddae7 | ||
|
|
0b70abca93 | ||
|
|
6de22a0264 | ||
|
|
fd811d1387 | ||
|
|
b617179525 | ||
|
|
28fc671ad5 | ||
|
|
e1b750f1e9 | ||
|
|
1ec680856d | ||
|
|
d79ea074f2 | ||
|
|
e68bcddfe0 | ||
|
|
4929d5936e | ||
|
|
1975b6455c |
8
.github/ruff.toml
vendored
8
.github/ruff.toml
vendored
@@ -1,16 +1,16 @@
|
||||
# Enable the pycodestyle (`E`) and Pyflakes (`F`) rules by default.
|
||||
# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
|
||||
# McCabe complexity (`C901`) by default.
|
||||
select = ["E", "F"]
|
||||
lint.select = ["E", "F"]
|
||||
|
||||
# Allow autofix for all enabled rules (when `--fix`) is provided.
|
||||
fixable = ["ALL"]
|
||||
unfixable = []
|
||||
lint.fixable = ["ALL"]
|
||||
lint.unfixable = []
|
||||
|
||||
# E402 module level import not at top of file
|
||||
# E722 do not use bare 'except'
|
||||
# E501 line too long
|
||||
ignore = ["E402", "E722", "E501"]
|
||||
lint.ignore = ["E402", "E722", "E501"]
|
||||
|
||||
line-length = 120
|
||||
|
||||
|
||||
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -3,6 +3,10 @@ name: build
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'web/**'
|
||||
- 'doc/**'
|
||||
- '**.md'
|
||||
release:
|
||||
types: [edited, published]
|
||||
|
||||
@@ -32,7 +36,7 @@ jobs:
|
||||
artifact_name: capa.exe
|
||||
asset_name: windows
|
||||
python_version: 3.8
|
||||
- os: macos-11
|
||||
- os: macos-12
|
||||
# use older macOS for assumed better portability
|
||||
artifact_name: capa
|
||||
asset_name: macos
|
||||
|
||||
16
.github/workflows/tests.yml
vendored
16
.github/workflows/tests.yml
vendored
@@ -1,10 +1,22 @@
|
||||
name: CI
|
||||
|
||||
# tests.yml workflow will run for all changes except:
|
||||
# any file or directory under web/ or doc/
|
||||
# any Markdown (.md) file anywhere in the repository
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'web/**'
|
||||
- 'doc/**'
|
||||
- '**.md'
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'web/**'
|
||||
- 'doc/**'
|
||||
- '**.md'
|
||||
|
||||
permissions: read-all
|
||||
|
||||
@@ -76,7 +88,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-20.04, windows-2019, macos-11]
|
||||
os: [ubuntu-20.04, windows-2019, macos-12]
|
||||
# across all operating systems
|
||||
python-version: ["3.8", "3.11"]
|
||||
include:
|
||||
@@ -119,7 +131,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.11"]
|
||||
python-version: ["3.9", "3.11"]
|
||||
steps:
|
||||
- name: Checkout capa with submodules
|
||||
# do only run if BN_SERIAL is available, have to do this in every step, see https://github.com/orgs/community/discussions/26726#discussioncomment-3253118
|
||||
|
||||
89
.github/workflows/web-deploy.yml
vendored
Normal file
89
.github/workflows/web-deploy.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
name: deploy web to GitHub Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, "wb/webui-actions-1" ]
|
||||
paths:
|
||||
- 'web/**'
|
||||
|
||||
# Allows to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets the GITHUB_TOKEN permissions to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow one concurrent deployment
|
||||
concurrency:
|
||||
group: 'pages'
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-landing-page:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: landing-page
|
||||
path: './web/public'
|
||||
|
||||
build-explorer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 1
|
||||
show-progress: true
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './web/explorer/package-lock.json'
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
working-directory: ./web/explorer
|
||||
- name: Generate release bundle
|
||||
run: npm run build:bundle
|
||||
working-directory: ./web/explorer
|
||||
- name: Zip release bundle
|
||||
run: zip -r public/capa-explorer-web.zip capa-explorer-web
|
||||
working-directory: ./web/explorer
|
||||
- name: Build
|
||||
run: npm run build
|
||||
working-directory: ./web/explorer
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: explorer
|
||||
path: './web/explorer/dist'
|
||||
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-landing-page, build-explorer]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: landing-page
|
||||
path: './public/'
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: explorer
|
||||
path: './public/explorer'
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v4
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: './public'
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
42
.github/workflows/web-tests.yml
vendored
Normal file
42
.github/workflows/web-tests.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: Capa Explorer Web tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- 'web/explorer/**'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 1
|
||||
show-progress: true
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './web/explorer/package-lock.json'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
working-directory: ./web/explorer
|
||||
|
||||
- name: Lint
|
||||
run: npm run lint
|
||||
working-directory: ./web/explorer
|
||||
|
||||
- name: Format
|
||||
run: npm run format:check
|
||||
working-directory: ./web/explorer
|
||||
|
||||
- name: Run unit tests
|
||||
run: npm run test
|
||||
working-directory: ./web/explorer
|
||||
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "rules"]
|
||||
path = rules
|
||||
url = ../capa-rules.git
|
||||
url = ../../mandiant/capa-rules.git
|
||||
[submodule "tests/data"]
|
||||
path = tests/data
|
||||
url = ../capa-testfiles.git
|
||||
url = ../../mandiant/capa-testfiles.git
|
||||
|
||||
51
CHANGELOG.md
51
CHANGELOG.md
@@ -17,8 +17,54 @@
|
||||
### Development
|
||||
|
||||
### Raw diffs
|
||||
- [capa v7.1.0...master](https://github.com/mandiant/capa/compare/v7.1.0...master)
|
||||
- [capa-rules v7.1.0...master](https://github.com/mandiant/capa-rules/compare/v7.1.0...master)
|
||||
- [capa v7.2.0...master](https://github.com/mandiant/capa/compare/v7.2.0...master)
|
||||
- [capa-rules v7.2.0...master](https://github.com/mandiant/capa-rules/compare/v7.2.0...master)
|
||||
|
||||
### v7.2.0
|
||||
capa v7.2.0 introduces a first version of capa explorer web: a web-based user interface to inspect capa results using your browser. Users can inspect capa result JSON documents in an online web instance or a standalone HTML page for offline usage. capa explorer supports interactive exploring of capa results to make it easier to understand them. Users can filter, sort, and see the details of all identified capabilities. capa explorer web was worked on by @s-ff as part of a [GSoC project](https://summerofcode.withgoogle.com/programs/2024/projects/cR3hjbsq), and it is available at https://mandiant.github.io/capa/explorer/#/.
|
||||
|
||||
This release also adds a feature extractor for output from the DRAKVUF sandbox. Now, analysts can pass the resulting `drakmon.log` file to capa and extract capabilities from the artifacts captured by the sandbox. This feature extractor will also be added to the DRAKVUF sandbox as a post-processing script, and it was worked on by @yelhamer as part of a [GSoC project](https://summerofcode.withgoogle.com/programs/2024/projects/fCnBGuEC).
|
||||
|
||||
Additionally, we fixed several bugs handling ELF files, and added the ability to filter capa analysis by functions or processes. We also added support to the IDA Pro extractor to leverage analyst recovered API names.
|
||||
|
||||
Special thanks to our repeat and new contributors:
|
||||
* @lakshayletsgo for their first contribution in https://github.com/mandiant/capa/pull/2248
|
||||
* @msm-cert for their first contribution in https://github.com/mandiant/capa/pull/2143
|
||||
* @VascoSch92 for their first contribution in https://github.com/mandiant/capa/pull/2143
|
||||
|
||||
### New Features
|
||||
|
||||
- webui: explore capa analysis results in a web-based UI online and offline #2224 @s-ff
|
||||
- support analyzing DRAKVUF traces #2143 @yelhamer
|
||||
- IDA extractor: extract names from dynamically resolved APIs stored in renamed global variables #2201 @Ana06
|
||||
- cli: add the ability to select which specific functions or processes to analyze @yelhamer
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
### New Rules (5)
|
||||
|
||||
- nursery/upload-file-to-onedrive jaredswilson@google.com ervinocampo@google.com
|
||||
- data-manipulation/encoding/base64/decode-data-using-base64-via-vbmi-lookup-table still@teamt5.org
|
||||
- communication/socket/attach-bpf-to-socket-on-linux jakub.jozwiak@mandiant.com
|
||||
- anti-analysis/anti-av/overwrite-dll-text-section-to-remove-hooks jakub.jozwiak@mandiant.com
|
||||
- nursery/delete-file-on-linux mehunhoff@google.com
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- elf: extract import / export symbols from stripped binaries #2096 @ygasparis
|
||||
- elf: fix handling of symbols in corrupt ELF files #2226 @williballenthin
|
||||
|
||||
### capa explorer IDA Pro plugin
|
||||
|
||||
### Development
|
||||
- CI: use macos-12 since macos-11 is deprecated and will be removed on June 28th, 2024 #2173 @mr-tz
|
||||
- CI: update Binary Ninja version to 4.1 and use Python 3.9 to test it #2211 @xusheng6
|
||||
- CI: update tests.yml workflow to exclude web and documentation files #2263 @s-ff
|
||||
- CI: update build.yml workflow to exclude web and documentation files #2270 @s-ff
|
||||
|
||||
### Raw diffs
|
||||
- [capa v7.1.0...7.2.0](https://github.com/mandiant/capa/compare/v7.1.0...7.2.0)
|
||||
- [capa-rules v7.1.0...7.2.0](https://github.com/mandiant/capa-rules/compare/v7.1.0...7.2.0)
|
||||
|
||||
## v7.1.0
|
||||
The v7.1.0 release brings large performance improvements to capa's rule matching engine.
|
||||
@@ -83,7 +129,6 @@ Special thanks to our repeat and new contributors:
|
||||
- binja: fix and simplify stack string detection code after binja 4.0 @xusheng6
|
||||
- binja: add support for forwarded export #1646 @xusheng6
|
||||
- cape: support more report formats #2035 @mr-tz
|
||||
- elf: extract import / export symbols from stripped binaries #2096 @ygasparis
|
||||
|
||||
### capa explorer IDA Pro plugin
|
||||
- replace deprecated IDA API find_binary with bin_search #1606 @s-ff
|
||||
|
||||
45
README.md
45
README.md
@@ -11,13 +11,13 @@ capa detects capabilities in executable files.
|
||||
You run it against a PE, ELF, .NET module, shellcode file, or a sandbox report and it tells you what it thinks the program can do.
|
||||
For example, it might suggest that the file is a backdoor, is capable of installing services, or relies on HTTP to communicate.
|
||||
|
||||
Check out our capa blog posts:
|
||||
- [Dynamic capa: Exploring Executable Run-Time Behavior with the CAPE Sandbox](https://www.mandiant.com/resources/blog/dynamic-capa-executable-behavior-cape-sandbox)
|
||||
- [capa v4: casting a wider .NET](https://www.mandiant.com/resources/blog/capa-v4-casting-wider-net) (.NET support)
|
||||
- [ELFant in the Room – capa v3](https://www.mandiant.com/resources/elfant-in-the-room-capa-v3) (ELF support)
|
||||
- [capa 2.0: Better, Stronger, Faster](https://www.mandiant.com/resources/capa-2-better-stronger-faster)
|
||||
- [capa: Automatically Identify Malware Capabilities](https://www.mandiant.com/resources/capa-automatically-identify-malware-capabilities)
|
||||
To interactively inspect capa results in your browser use the [capa web explorer](https://mandiant.github.io/capa/explorer/).
|
||||
|
||||
If you want to inspect or write capa rules, head on over to the [capa-rules repository](https://github.com/mandiant/capa-rules). Otherwise, keep reading.
|
||||
|
||||
Below you find a list of [our capa blog posts with more details.](#blog-posts)
|
||||
|
||||
# example capa output
|
||||
```
|
||||
$ capa.exe suspicious.exe
|
||||
|
||||
@@ -72,16 +72,23 @@ Download stable releases of the standalone capa binaries [here](https://github.c
|
||||
|
||||
To use capa as a library or integrate with another tool, see [doc/installation.md](https://github.com/mandiant/capa/blob/master/doc/installation.md) for further setup instructions.
|
||||
|
||||
For more information about how to use capa, see [doc/usage.md](https://github.com/mandiant/capa/blob/master/doc/usage.md).
|
||||
# web explorer
|
||||
The [capa web explorer](https://mandiant.github.io/capa/explorer/) enables you to interactively explore capa results in your web browser. Besides the online version you can download a standalone HTML file for local offline usage.
|
||||
|
||||

|
||||
|
||||
More details on the web UI is available in the [capa web explorer README](https://github.com/mandiant/capa/blob/master/web/explorer/README.md).
|
||||
|
||||
# example
|
||||
|
||||
In the above sample output, we ran capa against an unknown binary (`suspicious.exe`),
|
||||
and the tool reported that the program can send HTTP requests, decode data via XOR and Base64,
|
||||
In the above sample output, we run capa against an unknown binary (`suspicious.exe`),
|
||||
and the tool reports that the program can send HTTP requests, decode data via XOR and Base64,
|
||||
install services, and spawn new processes.
|
||||
Taken together, this makes us think that `suspicious.exe` could be a persistent backdoor.
|
||||
Therefore, our next analysis step might be to run `suspicious.exe` in a sandbox and try to recover the command and control server.
|
||||
|
||||
## detailed results
|
||||
|
||||
By passing the `-vv` flag (for very verbose), capa reports exactly where it found evidence of these capabilities.
|
||||
This is useful for at least two reasons:
|
||||
|
||||
@@ -126,8 +133,11 @@ function @ 0x4011C0
|
||||
...
|
||||
```
|
||||
|
||||
Additionally, capa also supports analyzing [CAPE](https://github.com/kevoreilly/CAPEv2) sandbox reports for dynamic capability extraction.
|
||||
In order to use this, you first submit your sample to CAPE for analysis, and then run capa against the generated report (JSON).
|
||||
## analyzing sandbox reports
|
||||
Additionally, capa also supports analyzing sandbox reports for dynamic capability extraction.
|
||||
In order to use this, you first submit your sample to one of supported sandboxes for analysis, and then run capa against the generated report file.
|
||||
|
||||
Currently, capa supports the [CAPE sandbox](https://github.com/kevoreilly/CAPEv2) and the [DRAKVUF sandbox](https://github.com/CERT-Polska/drakvuf-sandbox/). In order to use either, simply run capa against the generated file (JSON for CAPE or LOG for DRAKVUF sandbox) and it will automatically detect the sandbox and extract capabilities from it.
|
||||
|
||||
Here's an example of running capa against a packed binary, and then running capa against the CAPE report of that binary:
|
||||
|
||||
@@ -216,6 +226,7 @@ $ capa 05be49819139a3fdcdbddbdefd298398779521f3d68daa25275cc77508e42310.json
|
||||
┕━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┙
|
||||
```
|
||||
|
||||
# capa rules
|
||||
capa uses a collection of rules to identify capabilities within a program.
|
||||
These rules are easy to write, even for those new to reverse engineering.
|
||||
By authoring rules, you can extend the capabilities that capa recognizes.
|
||||
@@ -252,18 +263,28 @@ rule:
|
||||
- property/read: System.Net.Sockets.TcpClient::Client
|
||||
```
|
||||
|
||||
The [github.com/mandiant/capa-rules](https://github.com/mandiant/capa-rules) repository contains hundreds of standard library rules that are distributed with capa.
|
||||
The [github.com/mandiant/capa-rules](https://github.com/mandiant/capa-rules) repository contains hundreds of standard rules that are distributed with capa.
|
||||
Please learn to write rules and contribute new entries as you find interesting techniques in malware.
|
||||
|
||||
# IDA Pro plugin: capa explorer
|
||||
If you use IDA Pro, then you can use the [capa explorer](https://github.com/mandiant/capa/tree/master/capa/ida/plugin) plugin.
|
||||
capa explorer helps you identify interesting areas of a program and build new capa rules using features extracted directly from your IDA Pro database.
|
||||
It also uses your local changes to the .idb to extract better features, such as when you rename a global variable that contains a dynamically resolved API address.
|
||||
|
||||

|
||||
|
||||
# Ghidra integration
|
||||
If you use Ghidra, then you can use the [capa + Ghidra integration](/capa/ghidra/) to run capa's analysis directly on your Ghidra database and render the results in Ghidra's user interface.
|
||||
|
||||
<img src="https://github.com/mandiant/capa/assets/66766340/eeae33f4-99d4-42dc-a5e8-4c1b8c661492" width=300>
|
||||
|
||||
# blog posts
|
||||
- [Dynamic capa: Exploring Executable Run-Time Behavior with the CAPE Sandbox](https://www.mandiant.com/resources/blog/dynamic-capa-executable-behavior-cape-sandbox)
|
||||
- [capa v4: casting a wider .NET](https://www.mandiant.com/resources/blog/capa-v4-casting-wider-net) (.NET support)
|
||||
- [ELFant in the Room – capa v3](https://www.mandiant.com/resources/elfant-in-the-room-capa-v3) (ELF support)
|
||||
- [capa 2.0: Better, Stronger, Faster](https://www.mandiant.com/resources/capa-2-better-stronger-faster)
|
||||
- [capa: Automatically Identify Malware Capabilities](https://www.mandiant.com/resources/capa-automatically-identify-malware-capabilities)
|
||||
|
||||
# further information
|
||||
## capa
|
||||
- [Installation](https://github.com/mandiant/capa/blob/master/doc/installation.md)
|
||||
|
||||
@@ -23,3 +23,15 @@ class UnsupportedOSError(ValueError):
|
||||
|
||||
class EmptyReportError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidArgument(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class NonExistantFunctionError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class NonExistantProcessError(ValueError):
|
||||
pass
|
||||
|
||||
@@ -461,6 +461,7 @@ FORMAT_AUTO = "auto"
|
||||
FORMAT_SC32 = "sc32"
|
||||
FORMAT_SC64 = "sc64"
|
||||
FORMAT_CAPE = "cape"
|
||||
FORMAT_DRAKVUF = "drakvuf"
|
||||
FORMAT_FREEZE = "freeze"
|
||||
FORMAT_RESULT = "result"
|
||||
STATIC_FORMATS = {
|
||||
@@ -474,6 +475,7 @@ STATIC_FORMATS = {
|
||||
}
|
||||
DYNAMIC_FORMATS = {
|
||||
FORMAT_CAPE,
|
||||
FORMAT_DRAKVUF,
|
||||
FORMAT_FREEZE,
|
||||
FORMAT_RESULT,
|
||||
}
|
||||
|
||||
@@ -9,7 +9,9 @@
|
||||
import abc
|
||||
import hashlib
|
||||
import dataclasses
|
||||
from typing import Any, Dict, Tuple, Union, Iterator
|
||||
from copy import copy
|
||||
from types import MethodType
|
||||
from typing import Any, Set, Dict, Tuple, Union, Iterator
|
||||
from dataclasses import dataclass
|
||||
|
||||
# TODO(williballenthin): use typing.TypeAlias directly when Python 3.9 is deprecated
|
||||
@@ -296,6 +298,22 @@ class StaticFeatureExtractor:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def FunctionFilter(extractor: StaticFeatureExtractor, functions: Set) -> StaticFeatureExtractor:
|
||||
original_get_functions = extractor.get_functions
|
||||
|
||||
def filtered_get_functions(self):
|
||||
yield from (f for f in original_get_functions() if f.address in functions)
|
||||
|
||||
# we make a copy of the original extractor object and then update its get_functions() method with the decorated filter one.
|
||||
# this is in order to preserve the original extractor object's get_functions() method, in case it is used elsewhere in the code.
|
||||
# an example where this is important is in our testfiles where we may use the same extractor object with different tests,
|
||||
# with some of these tests needing to install a functions filter on the extractor object.
|
||||
new_extractor = copy(extractor)
|
||||
new_extractor.get_functions = MethodType(filtered_get_functions, extractor) # type: ignore
|
||||
|
||||
return new_extractor
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessHandle:
|
||||
"""
|
||||
@@ -467,4 +485,20 @@ class DynamicFeatureExtractor:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def ProcessFilter(extractor: DynamicFeatureExtractor, processes: Set) -> DynamicFeatureExtractor:
|
||||
original_get_processes = extractor.get_processes
|
||||
|
||||
def filtered_get_processes(self):
|
||||
yield from (f for f in original_get_processes() if f.address.pid in processes)
|
||||
|
||||
# we make a copy of the original extractor object and then update its get_processes() method with the decorated filter one.
|
||||
# this is in order to preserve the original extractor object's get_processes() method, in case it is used elsewhere in the code.
|
||||
# an example where this is important is in our testfiles where we may use the same extractor object with different tests,
|
||||
# with some of these tests needing to install a processes filter on the extractor object.
|
||||
new_extractor = copy(extractor)
|
||||
new_extractor.get_processes = MethodType(filtered_get_processes, extractor) # type: ignore
|
||||
|
||||
return new_extractor
|
||||
|
||||
|
||||
FeatureExtractor: TypeAlias = Union[StaticFeatureExtractor, DynamicFeatureExtractor]
|
||||
|
||||
@@ -28,7 +28,7 @@ from capa.features.extractors.base_extractor import (
|
||||
|
||||
class BinjaFeatureExtractor(StaticFeatureExtractor):
|
||||
def __init__(self, bv: binja.BinaryView):
|
||||
super().__init__(hashes=SampleHashes.from_bytes(bv.file.raw.read(0, len(bv.file.raw))))
|
||||
super().__init__(hashes=SampleHashes.from_bytes(bv.file.raw.read(0, bv.file.raw.length)))
|
||||
self.bv = bv
|
||||
self.global_features: List[Tuple[Feature, Address]] = []
|
||||
self.global_features.extend(capa.features.extractors.binja.file.extract_file_format(self.bv))
|
||||
|
||||
@@ -48,7 +48,7 @@ def extract_format(report: CapeReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
else:
|
||||
logger.warning("unknown file format, file command output: %s", report.target.file.type)
|
||||
raise ValueError(
|
||||
"unrecognized file format from the CAPE report; output of file command: {report.target.file.type}"
|
||||
f"unrecognized file format from the CAPE report; output of file command: {report.target.file.type}"
|
||||
)
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ def extract_os(report: CapeReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
else:
|
||||
# if the operating system information is missing from the cape report, it's likely a bug
|
||||
logger.warning("unrecognized OS: %s", file_output)
|
||||
raise ValueError("unrecognized OS from the CAPE report; output of file command: {file_output}")
|
||||
raise ValueError(f"unrecognized OS from the CAPE report; output of file command: {file_output}")
|
||||
else:
|
||||
# the sample is shellcode
|
||||
logger.debug("unsupported file format, file command output: %s", file_output)
|
||||
|
||||
56
capa/features/extractors/drakvuf/call.py
Normal file
56
capa/features/extractors/drakvuf/call.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Tuple, Iterator
|
||||
|
||||
from capa.features.insn import API, Number
|
||||
from capa.features.common import String, Feature
|
||||
from capa.features.address import Address
|
||||
from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, ProcessHandle
|
||||
from capa.features.extractors.drakvuf.models import Call
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def extract_call_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:
|
||||
"""
|
||||
This method extracts the given call's features (such as API name and arguments),
|
||||
and returns them as API, Number, and String features.
|
||||
|
||||
args:
|
||||
ph: process handle (for defining the extraction scope)
|
||||
th: thread handle (for defining the extraction scope)
|
||||
ch: call handle (for defining the extraction scope)
|
||||
|
||||
yields:
|
||||
Feature, address; where Feature is either: API, Number, or String.
|
||||
"""
|
||||
call: Call = ch.inner
|
||||
|
||||
# list similar to disassembly: arguments right-to-left, call
|
||||
for arg_value in reversed(call.arguments.values()):
|
||||
try:
|
||||
yield Number(int(arg_value, 0)), ch.address
|
||||
except ValueError:
|
||||
# DRAKVUF automatically resolves the contents of memory addresses, (e.g. Arg1="0xc6f217efe0:\"ntdll.dll\"").
|
||||
# For those cases we yield the entire string as it, since yielding the address only would
|
||||
# likely not provide any matches, and yielding just the memory contentswould probably be misleading,
|
||||
# but yielding the entire string would be helpful for an analyst looking at the verbose output
|
||||
yield String(arg_value), ch.address
|
||||
|
||||
yield API(call.name), ch.address
|
||||
|
||||
|
||||
def extract_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:
|
||||
for handler in CALL_HANDLERS:
|
||||
for feature, addr in handler(ph, th, ch):
|
||||
yield feature, addr
|
||||
|
||||
|
||||
CALL_HANDLERS = (extract_call_features,)
|
||||
96
capa/features/extractors/drakvuf/extractor.py
Normal file
96
capa/features/extractors/drakvuf/extractor.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Tuple, Union, Iterator
|
||||
|
||||
import capa.features.extractors.drakvuf.call
|
||||
import capa.features.extractors.drakvuf.file
|
||||
import capa.features.extractors.drakvuf.thread
|
||||
import capa.features.extractors.drakvuf.global_
|
||||
import capa.features.extractors.drakvuf.process
|
||||
from capa.features.common import Feature, Characteristic
|
||||
from capa.features.address import NO_ADDRESS, Address, ThreadAddress, ProcessAddress, AbsoluteVirtualAddress, _NoAddress
|
||||
from capa.features.extractors.base_extractor import (
|
||||
CallHandle,
|
||||
SampleHashes,
|
||||
ThreadHandle,
|
||||
ProcessHandle,
|
||||
DynamicFeatureExtractor,
|
||||
)
|
||||
from capa.features.extractors.drakvuf.models import Call, DrakvufReport
|
||||
from capa.features.extractors.drakvuf.helpers import index_calls
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DrakvufExtractor(DynamicFeatureExtractor):
|
||||
def __init__(self, report: DrakvufReport):
|
||||
super().__init__(
|
||||
# DRAKVUF currently does not yield hash information about the sample in its output
|
||||
hashes=SampleHashes(md5="", sha1="", sha256="")
|
||||
)
|
||||
|
||||
self.report: DrakvufReport = report
|
||||
|
||||
# sort the api calls to prevent going through the entire list each time
|
||||
self.sorted_calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]] = index_calls(report)
|
||||
|
||||
# pre-compute these because we'll yield them at *every* scope.
|
||||
self.global_features = list(capa.features.extractors.drakvuf.global_.extract_features(self.report))
|
||||
|
||||
def get_base_address(self) -> Union[AbsoluteVirtualAddress, _NoAddress, None]:
|
||||
# DRAKVUF currently does not yield information about the PE's address
|
||||
return NO_ADDRESS
|
||||
|
||||
def extract_global_features(self) -> Iterator[Tuple[Feature, Address]]:
|
||||
yield from self.global_features
|
||||
|
||||
def extract_file_features(self) -> Iterator[Tuple[Feature, Address]]:
|
||||
yield from capa.features.extractors.drakvuf.file.extract_features(self.report)
|
||||
|
||||
def get_processes(self) -> Iterator[ProcessHandle]:
|
||||
yield from capa.features.extractors.drakvuf.file.get_processes(self.sorted_calls)
|
||||
|
||||
def extract_process_features(self, ph: ProcessHandle) -> Iterator[Tuple[Feature, Address]]:
|
||||
yield from capa.features.extractors.drakvuf.process.extract_features(ph)
|
||||
|
||||
def get_process_name(self, ph: ProcessHandle) -> str:
|
||||
return ph.inner["process_name"]
|
||||
|
||||
def get_threads(self, ph: ProcessHandle) -> Iterator[ThreadHandle]:
|
||||
yield from capa.features.extractors.drakvuf.process.get_threads(self.sorted_calls, ph)
|
||||
|
||||
def extract_thread_features(self, ph: ProcessHandle, th: ThreadHandle) -> Iterator[Tuple[Feature, Address]]:
|
||||
if False:
|
||||
# force this routine to be a generator,
|
||||
# but we don't actually have any elements to generate.
|
||||
yield Characteristic("never"), NO_ADDRESS
|
||||
return
|
||||
|
||||
def get_calls(self, ph: ProcessHandle, th: ThreadHandle) -> Iterator[CallHandle]:
|
||||
yield from capa.features.extractors.drakvuf.thread.get_calls(self.sorted_calls, ph, th)
|
||||
|
||||
def get_call_name(self, ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> str:
|
||||
call: Call = ch.inner
|
||||
call_name = "{}({}){}".format(
|
||||
call.name,
|
||||
", ".join(f"{arg_name}={arg_value}" for arg_name, arg_value in call.arguments.items()),
|
||||
(f" -> {getattr(call, 'return_value', '')}"), # SysCalls don't have a return value, while WinApi calls do
|
||||
)
|
||||
return call_name
|
||||
|
||||
def extract_call_features(
|
||||
self, ph: ProcessHandle, th: ThreadHandle, ch: CallHandle
|
||||
) -> Iterator[Tuple[Feature, Address]]:
|
||||
yield from capa.features.extractors.drakvuf.call.extract_features(ph, th, ch)
|
||||
|
||||
@classmethod
|
||||
def from_report(cls, report: Iterator[Dict]) -> "DrakvufExtractor":
|
||||
dr = DrakvufReport.from_raw_report(report)
|
||||
return DrakvufExtractor(report=dr)
|
||||
56
capa/features/extractors/drakvuf/file.py
Normal file
56
capa/features/extractors/drakvuf/file.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Tuple, Iterator
|
||||
|
||||
from capa.features.file import Import
|
||||
from capa.features.common import Feature
|
||||
from capa.features.address import Address, ThreadAddress, ProcessAddress, AbsoluteVirtualAddress
|
||||
from capa.features.extractors.helpers import generate_symbols
|
||||
from capa.features.extractors.base_extractor import ProcessHandle
|
||||
from capa.features.extractors.drakvuf.models import Call, DrakvufReport
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_processes(calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]]) -> Iterator[ProcessHandle]:
|
||||
"""
|
||||
Get all the created processes for a sample.
|
||||
"""
|
||||
for proc_addr, calls_per_thread in calls.items():
|
||||
sample_call = next(iter(calls_per_thread.values()))[0] # get process name
|
||||
yield ProcessHandle(proc_addr, inner={"process_name": sample_call.process_name})
|
||||
|
||||
|
||||
def extract_import_names(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
"""
|
||||
Extract imported function names.
|
||||
"""
|
||||
if report.loaded_dlls is None:
|
||||
return
|
||||
dlls = report.loaded_dlls
|
||||
|
||||
for dll in dlls:
|
||||
dll_base_name = dll.name.split("\\")[-1]
|
||||
for function_name, function_address in dll.imports.items():
|
||||
for name in generate_symbols(dll_base_name, function_name, include_dll=True):
|
||||
yield Import(name), AbsoluteVirtualAddress(function_address)
|
||||
|
||||
|
||||
def extract_features(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
for handler in FILE_HANDLERS:
|
||||
for feature, addr in handler(report):
|
||||
yield feature, addr
|
||||
|
||||
|
||||
FILE_HANDLERS = (
|
||||
# TODO(yelhamer): extract more file features from other DRAKVUF plugins
|
||||
# https://github.com/mandiant/capa/issues/2169
|
||||
extract_import_names,
|
||||
)
|
||||
44
capa/features/extractors/drakvuf/global_.py
Normal file
44
capa/features/extractors/drakvuf/global_.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Tuple, Iterator
|
||||
|
||||
from capa.features.common import OS, FORMAT_PE, ARCH_AMD64, OS_WINDOWS, Arch, Format, Feature
|
||||
from capa.features.address import NO_ADDRESS, Address
|
||||
from capa.features.extractors.drakvuf.models import DrakvufReport
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def extract_format(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
# DRAKVUF sandbox currently supports only Windows as the guest: https://drakvuf-sandbox.readthedocs.io/en/latest/usage/getting_started.html
|
||||
yield Format(FORMAT_PE), NO_ADDRESS
|
||||
|
||||
|
||||
def extract_os(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
# DRAKVUF sandbox currently supports only PE files: https://drakvuf-sandbox.readthedocs.io/en/latest/usage/getting_started.html
|
||||
yield OS(OS_WINDOWS), NO_ADDRESS
|
||||
|
||||
|
||||
def extract_arch(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
# DRAKVUF sandbox currently supports only x64 Windows as the guest: https://drakvuf-sandbox.readthedocs.io/en/latest/usage/getting_started.html
|
||||
yield Arch(ARCH_AMD64), NO_ADDRESS
|
||||
|
||||
|
||||
def extract_features(report: DrakvufReport) -> Iterator[Tuple[Feature, Address]]:
|
||||
for global_handler in GLOBAL_HANDLER:
|
||||
for feature, addr in global_handler(report):
|
||||
yield feature, addr
|
||||
|
||||
|
||||
GLOBAL_HANDLER = (
|
||||
extract_format,
|
||||
extract_os,
|
||||
extract_arch,
|
||||
)
|
||||
39
capa/features/extractors/drakvuf/helpers.py
Normal file
39
capa/features/extractors/drakvuf/helpers.py
Normal file
@@ -0,0 +1,39 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import itertools
|
||||
from typing import Dict, List
|
||||
|
||||
from capa.features.address import ThreadAddress, ProcessAddress
|
||||
from capa.features.extractors.drakvuf.models import Call, DrakvufReport
|
||||
|
||||
|
||||
def index_calls(report: DrakvufReport) -> Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]]:
|
||||
# this method organizes calls into processes and threads, and then sorts them based on
|
||||
# timestamp so that we can address individual calls per index (CallAddress requires call index)
|
||||
result: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]] = {}
|
||||
for call in itertools.chain(report.syscalls, report.apicalls):
|
||||
if call.pid == 0:
|
||||
# DRAKVUF captures api/native calls from all processes running on the system.
|
||||
# we ignore the pid 0 since it's a system process and it's unlikely for it to
|
||||
# be hijacked or so on, in addition to capa addresses not supporting null pids
|
||||
continue
|
||||
proc_addr = ProcessAddress(pid=call.pid, ppid=call.ppid)
|
||||
thread_addr = ThreadAddress(process=proc_addr, tid=call.tid)
|
||||
if proc_addr not in result:
|
||||
result[proc_addr] = {}
|
||||
if thread_addr not in result[proc_addr]:
|
||||
result[proc_addr][thread_addr] = []
|
||||
|
||||
result[proc_addr][thread_addr].append(call)
|
||||
|
||||
for proc, threads in result.items():
|
||||
for thread in threads:
|
||||
result[proc][thread].sort(key=lambda call: call.timestamp)
|
||||
|
||||
return result
|
||||
137
capa/features/extractors/drakvuf/models.py
Normal file
137
capa/features/extractors/drakvuf/models.py
Normal file
@@ -0,0 +1,137 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
import logging
|
||||
from typing import Any, Dict, List, Iterator
|
||||
|
||||
from pydantic import Field, BaseModel, ConfigDict, model_validator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
REQUIRED_SYSCALL_FIELD_NAMES = {
|
||||
"Plugin",
|
||||
"TimeStamp",
|
||||
"PID",
|
||||
"PPID",
|
||||
"TID",
|
||||
"UserName",
|
||||
"UserId",
|
||||
"ProcessName",
|
||||
"Method",
|
||||
"EventUID",
|
||||
"Module",
|
||||
"vCPU",
|
||||
"CR3",
|
||||
"Syscall",
|
||||
"NArgs",
|
||||
}
|
||||
|
||||
|
||||
class ConciseModel(BaseModel):
|
||||
ConfigDict(extra="ignore")
|
||||
|
||||
|
||||
class DiscoveredDLL(ConciseModel):
|
||||
plugin_name: str = Field(alias="Plugin")
|
||||
event: str = Field(alias="Event")
|
||||
name: str = Field(alias="DllName")
|
||||
pid: int = Field(alias="PID")
|
||||
|
||||
|
||||
class LoadedDLL(ConciseModel):
|
||||
plugin_name: str = Field(alias="Plugin")
|
||||
event: str = Field(alias="Event")
|
||||
name: str = Field(alias="DllName")
|
||||
imports: Dict[str, int] = Field(alias="Rva")
|
||||
|
||||
|
||||
class Call(ConciseModel):
|
||||
plugin_name: str = Field(alias="Plugin")
|
||||
timestamp: str = Field(alias="TimeStamp")
|
||||
process_name: str = Field(alias="ProcessName")
|
||||
ppid: int = Field(alias="PPID")
|
||||
pid: int = Field(alias="PID")
|
||||
tid: int = Field(alias="TID")
|
||||
name: str = Field(alias="Method")
|
||||
arguments: Dict[str, str]
|
||||
|
||||
|
||||
class WinApiCall(Call):
|
||||
# This class models Windows API calls captured by DRAKVUF (DLLs, etc.).
|
||||
arguments: Dict[str, str] = Field(alias="Arguments")
|
||||
event: str = Field(alias="Event")
|
||||
return_value: str = Field(alias="ReturnValue")
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def build_arguments(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
args = values["Arguments"]
|
||||
values["Arguments"] = dict(arg.split("=", 1) for arg in args)
|
||||
return values
|
||||
|
||||
|
||||
class SystemCall(Call):
|
||||
# This class models native Windows API calls captured by DRAKVUF.
|
||||
# Schema: {
|
||||
# "Plugin": "syscall",
|
||||
# "TimeStamp": "1716999134.582553",
|
||||
# "PID": 3888, "PPID": 2852, "TID": 368, "UserName": "SessionID", "UserId": 2,
|
||||
# "ProcessName": "\\Device\\HarddiskVolume2\\Windows\\explorer.exe",
|
||||
# "Method": "NtSetIoCompletionEx",
|
||||
# "EventUID": "0x27",
|
||||
# "Module": "nt",
|
||||
# "vCPU": 0,
|
||||
# "CR3": "0x119b1002",
|
||||
# "Syscall": 419,
|
||||
# "NArgs": 6,
|
||||
# "IoCompletionHandle": "0xffffffff80001ac0", "IoCompletionReserveHandle": "0xffffffff8000188c",
|
||||
# "KeyContext": "0x0", "ApcContext": "0x2", "IoStatus": "0x7ffb00000000", "IoStatusInformation": "0x0"
|
||||
# }
|
||||
# The keys up until "NArgs" are common to all the native calls that DRAKVUF reports, with
|
||||
# the remaining keys representing the call's specific arguments.
|
||||
syscall_number: int = Field(alias="Syscall")
|
||||
module: str = Field(alias="Module")
|
||||
nargs: int = Field(alias="NArgs")
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# DRAKVUF stores argument names and values as entries in the syscall's entry.
|
||||
# This model validator collects those arguments into a list in the model.
|
||||
values["arguments"] = {
|
||||
name: value for name, value in values.items() if name not in REQUIRED_SYSCALL_FIELD_NAMES
|
||||
}
|
||||
return values
|
||||
|
||||
|
||||
class DrakvufReport(ConciseModel):
|
||||
syscalls: List[SystemCall] = []
|
||||
apicalls: List[WinApiCall] = []
|
||||
discovered_dlls: List[DiscoveredDLL] = []
|
||||
loaded_dlls: List[LoadedDLL] = []
|
||||
|
||||
@classmethod
|
||||
def from_raw_report(cls, entries: Iterator[Dict]) -> "DrakvufReport":
|
||||
report = cls()
|
||||
|
||||
for entry in entries:
|
||||
plugin = entry.get("Plugin")
|
||||
# TODO(yelhamer): add support for more DRAKVUF plugins
|
||||
# https://github.com/mandiant/capa/issues/2181
|
||||
if plugin == "syscall":
|
||||
report.syscalls.append(SystemCall(**entry))
|
||||
elif plugin == "apimon":
|
||||
event = entry.get("Event")
|
||||
if event == "api_called":
|
||||
report.apicalls.append(WinApiCall(**entry))
|
||||
elif event == "dll_loaded":
|
||||
report.loaded_dlls.append(LoadedDLL(**entry))
|
||||
elif event == "dll_discovered":
|
||||
report.discovered_dlls.append(DiscoveredDLL(**entry))
|
||||
|
||||
return report
|
||||
40
capa/features/extractors/drakvuf/process.py
Normal file
40
capa/features/extractors/drakvuf/process.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Tuple, Iterator
|
||||
|
||||
from capa.features.common import String, Feature
|
||||
from capa.features.address import Address, ThreadAddress, ProcessAddress
|
||||
from capa.features.extractors.base_extractor import ThreadHandle, ProcessHandle
|
||||
from capa.features.extractors.drakvuf.models import Call
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_threads(
|
||||
calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]], ph: ProcessHandle
|
||||
) -> Iterator[ThreadHandle]:
|
||||
"""
|
||||
Get the threads associated with a given process.
|
||||
"""
|
||||
for thread_addr in calls[ph.address]:
|
||||
yield ThreadHandle(address=thread_addr, inner={})
|
||||
|
||||
|
||||
def extract_process_name(ph: ProcessHandle) -> Iterator[Tuple[Feature, Address]]:
|
||||
yield String(ph.inner["process_name"]), ph.address
|
||||
|
||||
|
||||
def extract_features(ph: ProcessHandle) -> Iterator[Tuple[Feature, Address]]:
|
||||
for handler in PROCESS_HANDLERS:
|
||||
for feature, addr in handler(ph):
|
||||
yield feature, addr
|
||||
|
||||
|
||||
PROCESS_HANDLERS = (extract_process_name,)
|
||||
24
capa/features/extractors/drakvuf/thread.py
Normal file
24
capa/features/extractors/drakvuf/thread.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Iterator
|
||||
|
||||
from capa.features.address import ThreadAddress, ProcessAddress, DynamicCallAddress
|
||||
from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, ProcessHandle
|
||||
from capa.features.extractors.drakvuf.models import Call
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_calls(
|
||||
sorted_calls: Dict[ProcessAddress, Dict[ThreadAddress, List[Call]]], ph: ProcessHandle, th: ThreadHandle
|
||||
) -> Iterator[CallHandle]:
|
||||
for i, call in enumerate(sorted_calls[ph.address][th.address]):
|
||||
call_addr = DynamicCallAddress(thread=th.address, id=i)
|
||||
yield CallHandle(address=call_addr, inner=call)
|
||||
@@ -50,7 +50,12 @@ def extract_file_export_names(elf: ELFFile, **kwargs):
|
||||
if not isinstance(segment, DynamicSegment):
|
||||
continue
|
||||
|
||||
logger.debug("Dynamic Segment contains %s symbols: ", segment.num_symbols())
|
||||
tab_ptr, tab_offset = segment.get_table_offset("DT_SYMTAB")
|
||||
if tab_ptr is None or tab_offset is None:
|
||||
logger.debug("Dynamic segment doesn't contain DT_SYMTAB")
|
||||
continue
|
||||
|
||||
logger.debug("Dynamic segment contains %s symbols: ", segment.num_symbols())
|
||||
|
||||
for symbol in segment.iter_symbols():
|
||||
# The following conditions are based on the following article
|
||||
@@ -76,6 +81,11 @@ def extract_file_import_names(elf: ELFFile, **kwargs):
|
||||
if not isinstance(segment, DynamicSegment):
|
||||
continue
|
||||
|
||||
tab_ptr, tab_offset = segment.get_table_offset("DT_SYMTAB")
|
||||
if tab_ptr is None or tab_offset is None:
|
||||
logger.debug("Dynamic segment doesn't contain DT_SYMTAB")
|
||||
continue
|
||||
|
||||
for _, symbol in enumerate(segment.iter_symbols()):
|
||||
# The following conditions are based on the following article
|
||||
# http://www.m4b.io/elf/export/binary/analysis/2015/05/25/what-is-an-elf-export.html
|
||||
@@ -100,7 +110,16 @@ def extract_file_import_names(elf: ELFFile, **kwargs):
|
||||
logger.debug("Dynamic Segment contains %s relocation tables:", len(relocation_tables))
|
||||
|
||||
for relocation_table in relocation_tables.values():
|
||||
for relocation in relocation_table.iter_relocations():
|
||||
relocations = []
|
||||
for i in range(relocation_table.num_relocations()):
|
||||
try:
|
||||
relocations.append(relocation_table.get_relocation(i))
|
||||
except TypeError:
|
||||
# ELF is corrupt and the relocation table is invalid,
|
||||
# so stop processing it.
|
||||
break
|
||||
|
||||
for relocation in relocations:
|
||||
# Extract the symbol name from the symbol table using the symbol index in the relocation
|
||||
if relocation["r_info_sym"] not in symbol_names:
|
||||
continue
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
from typing import Any, Dict, Tuple, Iterator
|
||||
import re
|
||||
from typing import Any, Dict, Tuple, Iterator, Optional
|
||||
|
||||
import idc
|
||||
import ida_ua
|
||||
import idaapi
|
||||
import idautils
|
||||
|
||||
@@ -35,9 +37,9 @@ def get_externs(ctx: Dict[str, Any]) -> Dict[int, Any]:
|
||||
return ctx["externs_cache"]
|
||||
|
||||
|
||||
def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Iterator[Any]:
|
||||
def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Optional[Tuple[str, str]]:
|
||||
"""check instruction for API call"""
|
||||
info = ()
|
||||
info = None
|
||||
ref = insn.ea
|
||||
|
||||
# attempt to resolve API calls by following chained thunks to a reasonable depth
|
||||
@@ -52,7 +54,7 @@ def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Iterator[A
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
info = funcs.get(ref, ())
|
||||
info = funcs.get(ref)
|
||||
if info:
|
||||
break
|
||||
|
||||
@@ -60,8 +62,7 @@ def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Iterator[A
|
||||
if not f or not (f.flags & idaapi.FUNC_THUNK):
|
||||
break
|
||||
|
||||
if info:
|
||||
yield info
|
||||
return info
|
||||
|
||||
|
||||
def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
|
||||
@@ -76,16 +77,39 @@ def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle)
|
||||
if insn.get_canon_mnem() not in ("call", "jmp"):
|
||||
return
|
||||
|
||||
# check calls to imported functions
|
||||
for api in check_for_api_call(insn, get_imports(fh.ctx)):
|
||||
# check call to imported functions
|
||||
api = check_for_api_call(insn, get_imports(fh.ctx))
|
||||
if api:
|
||||
# tuple (<module>, <function>, <ordinal>)
|
||||
for name in capa.features.extractors.helpers.generate_symbols(api[0], api[1]):
|
||||
yield API(name), ih.address
|
||||
# a call instruction should only call one function, stop if a call to an import is extracted
|
||||
return
|
||||
|
||||
# check calls to extern functions
|
||||
for api in check_for_api_call(insn, get_externs(fh.ctx)):
|
||||
# check call to extern functions
|
||||
api = check_for_api_call(insn, get_externs(fh.ctx))
|
||||
if api:
|
||||
# tuple (<module>, <function>, <ordinal>)
|
||||
yield API(api[1]), ih.address
|
||||
# a call instruction should only call one function, stop if a call to an extern is extracted
|
||||
return
|
||||
|
||||
# extract dynamically resolved APIs stored in renamed globals (renamed for example using `renimp.idc`)
|
||||
# examples: `CreateProcessA`, `HttpSendRequestA`
|
||||
if insn.Op1.type == ida_ua.o_mem:
|
||||
op_addr = insn.Op1.addr
|
||||
op_name = idaapi.get_name(op_addr)
|
||||
# when renaming a global using an API name, IDA assigns it the function type
|
||||
# ensure we do not extract something wrong by checking that the address has a name and a type
|
||||
# we could check that the type is a function definition, but that complicates the code
|
||||
if (not op_name.startswith("off_")) and idc.get_type(op_addr):
|
||||
# Remove suffix used in repeated names, for example _0 in VirtualFree_0
|
||||
match = re.match(r"(.+)_\d+", op_name)
|
||||
if match:
|
||||
op_name = match.group(1)
|
||||
# the global name does not include the DLL name, so we can't extract it
|
||||
for name in capa.features.extractors.helpers.generate_symbols("", op_name):
|
||||
yield API(name), ih.address
|
||||
|
||||
# extract IDA/FLIRT recognized API functions
|
||||
targets = tuple(idautils.CodeRefsFrom(insn.ea, False))
|
||||
|
||||
@@ -7,15 +7,15 @@
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
import sys
|
||||
import gzip
|
||||
import json
|
||||
import inspect
|
||||
import logging
|
||||
import contextlib
|
||||
import importlib.util
|
||||
from typing import NoReturn
|
||||
from typing import Dict, Union, BinaryIO, Iterator, NoReturn
|
||||
from pathlib import Path
|
||||
|
||||
import tqdm
|
||||
import msgspec.json
|
||||
|
||||
from capa.exceptions import UnsupportedFormatError
|
||||
from capa.features.common import (
|
||||
@@ -25,13 +25,16 @@ from capa.features.common import (
|
||||
FORMAT_SC64,
|
||||
FORMAT_DOTNET,
|
||||
FORMAT_FREEZE,
|
||||
FORMAT_DRAKVUF,
|
||||
FORMAT_UNKNOWN,
|
||||
Format,
|
||||
)
|
||||
|
||||
EXTENSIONS_SHELLCODE_32 = ("sc32", "raw32")
|
||||
EXTENSIONS_SHELLCODE_64 = ("sc64", "raw64")
|
||||
EXTENSIONS_DYNAMIC = ("json", "json_", "json.gz")
|
||||
# CAPE extensions: .json, .json_, .json.gz
|
||||
# DRAKVUF Sandbox extensions: .log, .log.gz
|
||||
EXTENSIONS_DYNAMIC = ("json", "json_", "json.gz", "log", ".log.gz")
|
||||
EXTENSIONS_ELF = "elf_"
|
||||
EXTENSIONS_FREEZE = "frz"
|
||||
|
||||
@@ -76,13 +79,52 @@ def load_json_from_path(json_path: Path):
|
||||
try:
|
||||
report_json = compressed_report.read()
|
||||
except gzip.BadGzipFile:
|
||||
report = json.load(json_path.open(encoding="utf-8"))
|
||||
report = msgspec.json.decode(json_path.read_text(encoding="utf-8"))
|
||||
else:
|
||||
report = json.loads(report_json)
|
||||
report = msgspec.json.decode(report_json)
|
||||
return report
|
||||
|
||||
|
||||
def decode_json_lines(fd: Union[BinaryIO, gzip.GzipFile]):
|
||||
for line in fd:
|
||||
try:
|
||||
line_s = line.strip().decode()
|
||||
obj = msgspec.json.decode(line_s)
|
||||
yield obj
|
||||
except (msgspec.DecodeError, UnicodeDecodeError):
|
||||
# sometimes DRAKVUF reports bad method names and/or malformed JSON
|
||||
logger.debug("bad DRAKVUF log line: %s", line)
|
||||
|
||||
|
||||
def load_jsonl_from_path(jsonl_path: Path) -> Iterator[Dict]:
|
||||
try:
|
||||
with gzip.open(jsonl_path, "rb") as fg:
|
||||
yield from decode_json_lines(fg)
|
||||
except gzip.BadGzipFile:
|
||||
with jsonl_path.open(mode="rb") as f:
|
||||
yield from decode_json_lines(f)
|
||||
|
||||
|
||||
def load_one_jsonl_from_path(jsonl_path: Path):
|
||||
# this loads one json line to avoid the overhead of loading the entire file
|
||||
try:
|
||||
with gzip.open(jsonl_path, "rb") as f:
|
||||
line = next(iter(f))
|
||||
except gzip.BadGzipFile:
|
||||
with jsonl_path.open(mode="rb") as f:
|
||||
line = next(iter(f))
|
||||
finally:
|
||||
line = msgspec.json.decode(line.decode(errors="ignore"))
|
||||
return line
|
||||
|
||||
|
||||
def get_format_from_report(sample: Path) -> str:
|
||||
if sample.name.endswith((".log", "log.gz")):
|
||||
line = load_one_jsonl_from_path(sample)
|
||||
if "Plugin" in line:
|
||||
return FORMAT_DRAKVUF
|
||||
return FORMAT_UNKNOWN
|
||||
|
||||
report = load_json_from_path(sample)
|
||||
if "CAPE" in report:
|
||||
return FORMAT_CAPE
|
||||
@@ -189,9 +231,20 @@ def log_unsupported_cape_report_error(error: str):
|
||||
logger.error("-" * 80)
|
||||
|
||||
|
||||
def log_empty_cape_report_error(error: str):
|
||||
def log_unsupported_drakvuf_report_error(error: str):
|
||||
logger.error("-" * 80)
|
||||
logger.error(" CAPE report is empty or only contains little useful data: %s", error)
|
||||
logger.error(" Input file is not a valid DRAKVUF output file: %s", error)
|
||||
logger.error(" ")
|
||||
logger.error(" capa currently only supports analyzing standard DRAKVUF outputs in JSONL format.")
|
||||
logger.error(
|
||||
" Please make sure your report file is in the standard format and contains both the static and dynamic sections."
|
||||
)
|
||||
logger.error("-" * 80)
|
||||
|
||||
|
||||
def log_empty_sandbox_report_error(error: str, sandbox_name: str):
|
||||
logger.error("-" * 80)
|
||||
logger.error(" %s report is empty or only contains little useful data: %s", sandbox_name, error)
|
||||
logger.error(" ")
|
||||
logger.error(" Please make sure the sandbox run captures useful behaviour of your sample.")
|
||||
logger.error("-" * 80)
|
||||
|
||||
@@ -81,6 +81,7 @@ can update using the `Settings` button.
|
||||
* Double-click the `Address` column to navigate your Disassembly view to the address of the associated feature
|
||||
* Double-click a result in the `Rule Information` column to expand its children
|
||||
* Select a checkbox in the `Rule Information` column to highlight the address of the associated feature in your Disassembly view
|
||||
* Reanalyze if you renamed global variables that store dynamically resolved APIs. capa will use these to improve its analysis.
|
||||
|
||||
#### Tips for Rule Generator
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@ from capa.features.common import (
|
||||
FORMAT_SC32,
|
||||
FORMAT_SC64,
|
||||
FORMAT_DOTNET,
|
||||
FORMAT_DRAKVUF,
|
||||
)
|
||||
from capa.features.address import Address
|
||||
from capa.features.extractors.base_extractor import (
|
||||
@@ -61,9 +62,14 @@ BACKEND_DOTNET = "dotnet"
|
||||
BACKEND_BINJA = "binja"
|
||||
BACKEND_PEFILE = "pefile"
|
||||
BACKEND_CAPE = "cape"
|
||||
BACKEND_DRAKVUF = "drakvuf"
|
||||
BACKEND_FREEZE = "freeze"
|
||||
|
||||
|
||||
class CorruptFile(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def is_supported_format(sample: Path) -> bool:
|
||||
"""
|
||||
Return if this is a supported file based on magic header values
|
||||
@@ -137,21 +143,28 @@ def get_workspace(path: Path, input_format: str, sigpaths: List[Path]):
|
||||
import viv_utils.flirt
|
||||
|
||||
logger.debug("generating vivisect workspace for: %s", path)
|
||||
if input_format == FORMAT_AUTO:
|
||||
if not is_supported_format(path):
|
||||
raise UnsupportedFormatError()
|
||||
|
||||
# don't analyze, so that we can add our Flirt function analyzer first.
|
||||
vw = viv_utils.getWorkspace(str(path), analyze=False, should_save=False)
|
||||
elif input_format in {FORMAT_PE, FORMAT_ELF}:
|
||||
vw = viv_utils.getWorkspace(str(path), analyze=False, should_save=False)
|
||||
elif input_format == FORMAT_SC32:
|
||||
# these are not analyzed nor saved.
|
||||
vw = viv_utils.getShellcodeWorkspaceFromFile(str(path), arch="i386", analyze=False)
|
||||
elif input_format == FORMAT_SC64:
|
||||
vw = viv_utils.getShellcodeWorkspaceFromFile(str(path), arch="amd64", analyze=False)
|
||||
else:
|
||||
raise ValueError("unexpected format: " + input_format)
|
||||
try:
|
||||
if input_format == FORMAT_AUTO:
|
||||
if not is_supported_format(path):
|
||||
raise UnsupportedFormatError()
|
||||
|
||||
# don't analyze, so that we can add our Flirt function analyzer first.
|
||||
vw = viv_utils.getWorkspace(str(path), analyze=False, should_save=False)
|
||||
elif input_format in {FORMAT_PE, FORMAT_ELF}:
|
||||
vw = viv_utils.getWorkspace(str(path), analyze=False, should_save=False)
|
||||
elif input_format == FORMAT_SC32:
|
||||
# these are not analyzed nor saved.
|
||||
vw = viv_utils.getShellcodeWorkspaceFromFile(str(path), arch="i386", analyze=False)
|
||||
elif input_format == FORMAT_SC64:
|
||||
vw = viv_utils.getShellcodeWorkspaceFromFile(str(path), arch="amd64", analyze=False)
|
||||
else:
|
||||
raise ValueError("unexpected format: " + input_format)
|
||||
except Exception as e:
|
||||
# vivisect raises raw Exception instances, and we don't want
|
||||
# to do a subclass check via isinstance.
|
||||
if type(e) is Exception and "Couldn't convert rva" in e.args[0]:
|
||||
raise CorruptFile(e.args[0]) from e
|
||||
|
||||
viv_utils.flirt.register_flirt_signature_analyzers(vw, [str(s) for s in sigpaths])
|
||||
|
||||
@@ -199,6 +212,12 @@ def get_extractor(
|
||||
report = capa.helpers.load_json_from_path(input_path)
|
||||
return capa.features.extractors.cape.extractor.CapeExtractor.from_report(report)
|
||||
|
||||
elif backend == BACKEND_DRAKVUF:
|
||||
import capa.features.extractors.drakvuf.extractor
|
||||
|
||||
report = capa.helpers.load_jsonl_from_path(input_path)
|
||||
return capa.features.extractors.drakvuf.extractor.DrakvufExtractor.from_report(report)
|
||||
|
||||
elif backend == BACKEND_DOTNET:
|
||||
import capa.features.extractors.dnfile.extractor
|
||||
|
||||
@@ -316,6 +335,13 @@ def get_file_extractors(input_file: Path, input_format: str) -> List[FeatureExtr
|
||||
report = capa.helpers.load_json_from_path(input_file)
|
||||
file_extractors.append(capa.features.extractors.cape.extractor.CapeExtractor.from_report(report))
|
||||
|
||||
elif input_format == FORMAT_DRAKVUF:
|
||||
import capa.helpers
|
||||
import capa.features.extractors.drakvuf.extractor
|
||||
|
||||
report = capa.helpers.load_jsonl_from_path(input_file)
|
||||
file_extractors.append(capa.features.extractors.drakvuf.extractor.DrakvufExtractor.from_report(report))
|
||||
|
||||
return file_extractors
|
||||
|
||||
|
||||
|
||||
120
capa/main.py
120
capa/main.py
@@ -17,7 +17,7 @@ import argparse
|
||||
import textwrap
|
||||
import contextlib
|
||||
from types import TracebackType
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Set, Dict, List, Optional, TypedDict
|
||||
from pathlib import Path
|
||||
|
||||
import colorama
|
||||
@@ -42,17 +42,27 @@ import capa.render.result_document as rdoc
|
||||
import capa.features.extractors.common
|
||||
from capa.rules import RuleSet
|
||||
from capa.engine import MatchResults
|
||||
from capa.loader import BACKEND_VIV, BACKEND_CAPE, BACKEND_BINJA, BACKEND_DOTNET, BACKEND_FREEZE, BACKEND_PEFILE
|
||||
from capa.loader import (
|
||||
BACKEND_VIV,
|
||||
BACKEND_CAPE,
|
||||
BACKEND_BINJA,
|
||||
BACKEND_DOTNET,
|
||||
BACKEND_FREEZE,
|
||||
BACKEND_PEFILE,
|
||||
BACKEND_DRAKVUF,
|
||||
)
|
||||
from capa.helpers import (
|
||||
get_file_taste,
|
||||
get_auto_format,
|
||||
log_unsupported_os_error,
|
||||
log_unsupported_arch_error,
|
||||
log_empty_cape_report_error,
|
||||
log_unsupported_format_error,
|
||||
log_empty_sandbox_report_error,
|
||||
log_unsupported_cape_report_error,
|
||||
log_unsupported_drakvuf_report_error,
|
||||
)
|
||||
from capa.exceptions import (
|
||||
InvalidArgument,
|
||||
EmptyReportError,
|
||||
UnsupportedOSError,
|
||||
UnsupportedArchError,
|
||||
@@ -73,9 +83,18 @@ from capa.features.common import (
|
||||
FORMAT_DOTNET,
|
||||
FORMAT_FREEZE,
|
||||
FORMAT_RESULT,
|
||||
FORMAT_DRAKVUF,
|
||||
STATIC_FORMATS,
|
||||
DYNAMIC_FORMATS,
|
||||
)
|
||||
from capa.capabilities.common import find_capabilities, has_file_limitation, find_file_capabilities
|
||||
from capa.features.extractors.base_extractor import FeatureExtractor, StaticFeatureExtractor, DynamicFeatureExtractor
|
||||
from capa.features.extractors.base_extractor import (
|
||||
ProcessFilter,
|
||||
FunctionFilter,
|
||||
FeatureExtractor,
|
||||
StaticFeatureExtractor,
|
||||
DynamicFeatureExtractor,
|
||||
)
|
||||
|
||||
RULES_PATH_DEFAULT_STRING = "(embedded rules)"
|
||||
SIGNATURES_PATH_DEFAULT_STRING = "(embedded signatures)"
|
||||
@@ -96,10 +115,17 @@ E_MISSING_CAPE_STATIC_ANALYSIS = 21
|
||||
E_MISSING_CAPE_DYNAMIC_ANALYSIS = 22
|
||||
E_EMPTY_REPORT = 23
|
||||
E_UNSUPPORTED_GHIDRA_EXECUTION_MODE = 24
|
||||
E_INVALID_INPUT_FORMAT = 25
|
||||
E_INVALID_FEATURE_EXTRACTOR = 26
|
||||
|
||||
logger = logging.getLogger("capa")
|
||||
|
||||
|
||||
class FilterConfig(TypedDict, total=False):
|
||||
processes: Set[int]
|
||||
functions: Set[int]
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def timing(msg: str):
|
||||
t0 = time.time()
|
||||
@@ -232,6 +258,7 @@ def install_common_args(parser, wanted=None):
|
||||
(FORMAT_SC32, "32-bit shellcode"),
|
||||
(FORMAT_SC64, "64-bit shellcode"),
|
||||
(FORMAT_CAPE, "CAPE sandbox report"),
|
||||
(FORMAT_DRAKVUF, "DRAKVUF sandbox report"),
|
||||
(FORMAT_FREEZE, "features previously frozen by capa"),
|
||||
]
|
||||
format_help = ", ".join([f"{f[0]}: {f[1]}" for f in formats])
|
||||
@@ -253,6 +280,7 @@ def install_common_args(parser, wanted=None):
|
||||
(BACKEND_DOTNET, ".NET"),
|
||||
(BACKEND_FREEZE, "capa freeze"),
|
||||
(BACKEND_CAPE, "CAPE"),
|
||||
(BACKEND_DRAKVUF, "DRAKVUF"),
|
||||
]
|
||||
backend_help = ", ".join([f"{f[0]}: {f[1]}" for f in backends])
|
||||
parser.add_argument(
|
||||
@@ -264,6 +292,22 @@ def install_common_args(parser, wanted=None):
|
||||
help=f"select backend, {backend_help}",
|
||||
)
|
||||
|
||||
if "restrict-to-functions" in wanted:
|
||||
parser.add_argument(
|
||||
"--restrict-to-functions",
|
||||
type=lambda s: s.replace(" ", "").split(","),
|
||||
default=[],
|
||||
help="provide a list of comma-separated function virtual addresses to analyze (static analysis).",
|
||||
)
|
||||
|
||||
if "restrict-to-processes" in wanted:
|
||||
parser.add_argument(
|
||||
"--restrict-to-processes",
|
||||
type=lambda s: s.replace(" ", "").split(","),
|
||||
default=[],
|
||||
help="provide a list of comma-separated process IDs to analyze (dynamic analysis).",
|
||||
)
|
||||
|
||||
if "os" in wanted:
|
||||
oses = [
|
||||
(OS_AUTO, "detect OS automatically - default"),
|
||||
@@ -505,6 +549,9 @@ def get_backend_from_cli(args, input_format: str) -> str:
|
||||
if input_format == FORMAT_CAPE:
|
||||
return BACKEND_CAPE
|
||||
|
||||
if input_format == FORMAT_DRAKVUF:
|
||||
return BACKEND_DRAKVUF
|
||||
|
||||
elif input_format == FORMAT_DOTNET:
|
||||
return BACKEND_DOTNET
|
||||
|
||||
@@ -529,7 +576,7 @@ def get_sample_path_from_cli(args, backend: str) -> Optional[Path]:
|
||||
raises:
|
||||
ShouldExitError: if the program is invoked incorrectly and should exit.
|
||||
"""
|
||||
if backend == BACKEND_CAPE:
|
||||
if backend in (BACKEND_CAPE, BACKEND_DRAKVUF):
|
||||
return None
|
||||
else:
|
||||
return args.input_file
|
||||
@@ -632,12 +679,17 @@ def get_file_extractors_from_cli(args, input_format: str) -> List[FeatureExtract
|
||||
except UnsupportedFormatError as e:
|
||||
if input_format == FORMAT_CAPE:
|
||||
log_unsupported_cape_report_error(str(e))
|
||||
elif input_format == FORMAT_DRAKVUF:
|
||||
log_unsupported_drakvuf_report_error(str(e))
|
||||
else:
|
||||
log_unsupported_format_error()
|
||||
raise ShouldExitError(E_INVALID_FILE_TYPE) from e
|
||||
except EmptyReportError as e:
|
||||
if input_format == FORMAT_CAPE:
|
||||
log_empty_cape_report_error(str(e))
|
||||
log_empty_sandbox_report_error(str(e), sandbox_name="CAPE")
|
||||
raise ShouldExitError(E_EMPTY_REPORT) from e
|
||||
elif input_format == FORMAT_DRAKVUF:
|
||||
log_empty_sandbox_report_error(str(e), sandbox_name="DRAKVUF")
|
||||
raise ShouldExitError(E_EMPTY_REPORT) from e
|
||||
else:
|
||||
log_unsupported_format_error()
|
||||
@@ -729,9 +781,10 @@ def get_extractor_from_cli(args, input_format: str, backend: str) -> FeatureExtr
|
||||
|
||||
os_ = get_os_from_cli(args, backend)
|
||||
sample_path = get_sample_path_from_cli(args, backend)
|
||||
extractor_filters = get_extractor_filters_from_cli(args, input_format)
|
||||
|
||||
try:
|
||||
return capa.loader.get_extractor(
|
||||
extractor = capa.loader.get_extractor(
|
||||
args.input_file,
|
||||
input_format,
|
||||
os_,
|
||||
@@ -741,9 +794,12 @@ def get_extractor_from_cli(args, input_format: str, backend: str) -> FeatureExtr
|
||||
disable_progress=args.quiet or args.debug,
|
||||
sample_path=sample_path,
|
||||
)
|
||||
return apply_extractor_filters(extractor, extractor_filters)
|
||||
except UnsupportedFormatError as e:
|
||||
if input_format == FORMAT_CAPE:
|
||||
log_unsupported_cape_report_error(str(e))
|
||||
elif input_format == FORMAT_DRAKVUF:
|
||||
log_unsupported_drakvuf_report_error(str(e))
|
||||
else:
|
||||
log_unsupported_format_error()
|
||||
raise ShouldExitError(E_INVALID_FILE_TYPE) from e
|
||||
@@ -753,6 +809,41 @@ def get_extractor_from_cli(args, input_format: str, backend: str) -> FeatureExtr
|
||||
except UnsupportedOSError as e:
|
||||
log_unsupported_os_error()
|
||||
raise ShouldExitError(E_INVALID_FILE_OS) from e
|
||||
except capa.loader.CorruptFile as e:
|
||||
logger.error("Input file '%s' is not a valid file: %s", args.input_file, str(e))
|
||||
raise ShouldExitError(E_CORRUPT_FILE) from e
|
||||
|
||||
|
||||
def get_extractor_filters_from_cli(args, input_format) -> FilterConfig:
|
||||
if not hasattr(args, "restrict_to_processes") and not hasattr(args, "restrict_to_functions"):
|
||||
# no processes or function filters were installed in the args
|
||||
return {}
|
||||
|
||||
if input_format in STATIC_FORMATS:
|
||||
if args.restrict_to_processes:
|
||||
raise InvalidArgument("Cannot filter processes with static analysis.")
|
||||
return {"functions": {int(addr, 0) for addr in args.restrict_to_functions}}
|
||||
elif input_format in DYNAMIC_FORMATS:
|
||||
if args.restrict_to_functions:
|
||||
raise InvalidArgument("Cannot filter functions with dynamic analysis.")
|
||||
return {"processes": {int(pid, 0) for pid in args.restrict_to_processes}}
|
||||
else:
|
||||
raise ShouldExitError(E_INVALID_INPUT_FORMAT)
|
||||
|
||||
|
||||
def apply_extractor_filters(extractor: FeatureExtractor, extractor_filters: FilterConfig):
|
||||
if not any(extractor_filters.values()):
|
||||
return extractor
|
||||
|
||||
# if the user specified extractor filters, then apply them here
|
||||
if isinstance(extractor, StaticFeatureExtractor):
|
||||
assert extractor_filters["functions"]
|
||||
return FunctionFilter(extractor, extractor_filters["functions"])
|
||||
elif isinstance(extractor, DynamicFeatureExtractor):
|
||||
assert extractor_filters["processes"]
|
||||
return ProcessFilter(extractor, extractor_filters["processes"])
|
||||
else:
|
||||
raise ShouldExitError(E_INVALID_FEATURE_EXTRACTOR)
|
||||
|
||||
|
||||
def main(argv: Optional[List[str]] = None):
|
||||
@@ -794,7 +885,20 @@ def main(argv: Optional[List[str]] = None):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=desc, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
)
|
||||
install_common_args(parser, {"input_file", "format", "backend", "os", "signatures", "rules", "tag"})
|
||||
install_common_args(
|
||||
parser,
|
||||
{
|
||||
"input_file",
|
||||
"format",
|
||||
"backend",
|
||||
"os",
|
||||
"signatures",
|
||||
"rules",
|
||||
"tag",
|
||||
"restrict-to-functions",
|
||||
"restrict-to-processes",
|
||||
},
|
||||
)
|
||||
parser.add_argument("-j", "--json", action="store_true", help="emit JSON instead of text")
|
||||
args = parser.parse_args(args=argv)
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import os
|
||||
import re
|
||||
import copy
|
||||
import uuid
|
||||
import codecs
|
||||
import logging
|
||||
import binascii
|
||||
import collections
|
||||
@@ -456,7 +455,7 @@ DESCRIPTION_SEPARATOR = " = "
|
||||
|
||||
def parse_bytes(s: str) -> bytes:
|
||||
try:
|
||||
b = codecs.decode(s.replace(" ", "").encode("ascii"), "hex")
|
||||
b = bytes.fromhex(s.replace(" ", ""))
|
||||
except binascii.Error:
|
||||
raise InvalidRule(f'unexpected bytes value: must be a valid hex sequence: "{s}"')
|
||||
|
||||
@@ -1918,7 +1917,6 @@ class RuleSet:
|
||||
# This strategy is described here:
|
||||
# https://github.com/mandiant/capa/issues/2129
|
||||
if feature_index.string_rules:
|
||||
|
||||
# This is a FeatureSet that contains only String features.
|
||||
# Since we'll only be evaluating String/Regex features below, we don't care about
|
||||
# other sorts of features (Mnemonic, Number, etc.) and therefore can save some time
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
__version__ = "7.1.0"
|
||||
__version__ = "7.2.0"
|
||||
|
||||
|
||||
def get_major_version():
|
||||
|
||||
BIN
doc/img/capa_web_explorer.png
Normal file
BIN
doc/img/capa_web_explorer.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 603 KiB |
18
doc/usage.md
18
doc/usage.md
@@ -9,6 +9,22 @@ Use the `-t` option to run rules with the given metadata value (see the rule fie
|
||||
For example, `capa -t william.ballenthin@mandiant.com` runs rules that reference Willi's email address (probably as the author), or
|
||||
`capa -t communication` runs rules with the namespace `communication`.
|
||||
|
||||
### only analyze selected functions
|
||||
Use the `--restrict-to-functions` option to extract capabilities from only a selected set of functions. This is useful for analyzing
|
||||
large functions and figuring out their capabilities and their address of occurance; for example: PEB access, RC4 encryption, etc.
|
||||
|
||||
To use this, you can copy the virtual addresses from your favorite disassembler and pass them to capa as follows:
|
||||
`capa sample.exe --restrict-to-functions 0x4019C0,0x401CD0`. If you add the `-v` option then capa will extract the interesting parts of a function for you.
|
||||
|
||||
### only analyze selected processes
|
||||
Use the `--restrict-to-processes` option to extract capabilities from only a selected set of processes. This is useful for filtering the noise
|
||||
generated from analyzing non-malicious processes that can be reported by some sandboxes, as well as reduce the execution time
|
||||
by not analyzing such processes in the first place.
|
||||
|
||||
To use this, you can pick the PIDs of the processes you are interested in from the sandbox-generated process tree (or from the sandbox-reported malware PID)
|
||||
and pass that to capa as follows: `capa report.log --restrict-to-processes 3888,3214,4299`. If you add the `-v` option then capa will tell you
|
||||
which threads perform what actions (encrypt/decrypt data, initiate a connection, etc.).
|
||||
|
||||
### IDA Pro plugin: capa explorer
|
||||
Please check out the [capa explorer documentation](/capa/ida/plugin/README.md).
|
||||
|
||||
@@ -16,4 +32,4 @@ Please check out the [capa explorer documentation](/capa/ida/plugin/README.md).
|
||||
Set the environment variable `CAPA_SAVE_WORKSPACE` to instruct the underlying analysis engine to
|
||||
cache its intermediate results to the file system. For example, vivisect will create `.viv` files.
|
||||
Subsequently, capa may run faster when reprocessing the same input file.
|
||||
This is particularly useful during rule development as you repeatedly test a rule against a known sample.
|
||||
This is particularly useful during rule development as you repeatedly test a rule against a known sample.
|
||||
|
||||
@@ -79,6 +79,7 @@ dependencies = [
|
||||
"rich>=13",
|
||||
"humanize>=4",
|
||||
"protobuf>=5",
|
||||
"msgspec>=0.18.6",
|
||||
|
||||
# ---------------------------------------
|
||||
# Dependencies that we develop
|
||||
@@ -123,10 +124,10 @@ dev = [
|
||||
"pytest-sugar==1.0.0",
|
||||
"pytest-instafail==0.5.0",
|
||||
"pytest-cov==5.0.0",
|
||||
"flake8==7.0.0",
|
||||
"flake8==7.1.0",
|
||||
"flake8-bugbear==24.4.26",
|
||||
"flake8-encodings==0.5.1",
|
||||
"flake8-comprehensions==3.14.0",
|
||||
"flake8-comprehensions==3.15.0",
|
||||
"flake8-logging-format==0.9.0",
|
||||
"flake8-no-implicit-concat==0.3.5",
|
||||
"flake8-print==5.0.0",
|
||||
@@ -134,10 +135,10 @@ dev = [
|
||||
"flake8-simplify==0.21.0",
|
||||
"flake8-use-pathlib==0.3.0",
|
||||
"flake8-copyright==0.2.4",
|
||||
"ruff==0.4.8",
|
||||
"black==24.4.2",
|
||||
"ruff==0.5.6",
|
||||
"black==24.8.0",
|
||||
"isort==5.13.2",
|
||||
"mypy==1.10.0",
|
||||
"mypy==1.11.1",
|
||||
"mypy-protobuf==3.6.0",
|
||||
"PyGithub==2.3.0",
|
||||
# type stubs for mypy
|
||||
@@ -146,26 +147,26 @@ dev = [
|
||||
"types-PyYAML==6.0.8",
|
||||
"types-tabulate==0.9.0.20240106",
|
||||
"types-termcolor==1.1.4",
|
||||
"types-psutil==5.8.23",
|
||||
"types_requests==2.32.0.20240602",
|
||||
"types-protobuf==5.26.0.20240422",
|
||||
"deptry==0.16.1"
|
||||
"types-psutil==6.0.0.20240621",
|
||||
"types_requests==2.32.0.20240712",
|
||||
"types-protobuf==5.27.0.20240626",
|
||||
"deptry==0.17.0"
|
||||
]
|
||||
build = [
|
||||
# Dev and build dependencies are not relaxed because
|
||||
# we want all developer environments to be consistent.
|
||||
# These dependencies are not used in production environments
|
||||
# and should not conflict with other libraries/tooling.
|
||||
"pyinstaller==6.8.0",
|
||||
"pyinstaller==6.10.0",
|
||||
"setuptools==70.0.0",
|
||||
"build==1.2.1"
|
||||
]
|
||||
scripts = [
|
||||
"jschema_to_python==1.2.3",
|
||||
"psutil==5.9.2",
|
||||
"psutil==6.0.0",
|
||||
"stix2==3.0.1",
|
||||
"sarif_om==1.0.4",
|
||||
"requests==2.31.0",
|
||||
"requests==2.32.3",
|
||||
]
|
||||
|
||||
[tool.deptry]
|
||||
@@ -187,6 +188,7 @@ known_first_party = [
|
||||
"ida_loader",
|
||||
"ida_nalt",
|
||||
"ida_segment",
|
||||
"ida_ua",
|
||||
"idaapi",
|
||||
"idautils",
|
||||
"idc",
|
||||
|
||||
@@ -21,8 +21,8 @@ mdurl==0.1.2
|
||||
msgpack==1.0.8
|
||||
networkx==3.1
|
||||
pefile==2023.2.7
|
||||
pip==24.0
|
||||
protobuf==5.27.1
|
||||
pip==24.2
|
||||
protobuf==5.27.3
|
||||
pyasn1==0.4.8
|
||||
pyasn1-modules==0.2.8
|
||||
pycparser==2.22
|
||||
@@ -40,7 +40,7 @@ six==1.16.0
|
||||
sortedcontainers==2.4.0
|
||||
tabulate==0.9.0
|
||||
termcolor==2.4.0
|
||||
tqdm==4.66.4
|
||||
tqdm==4.66.5
|
||||
viv-utils==0.7.11
|
||||
vivisect==1.1.1
|
||||
wcwidth==0.2.13
|
||||
|
||||
2
rules
2
rules
Submodule rules updated: e63c454fbb...5b8c8a63a2
@@ -69,7 +69,8 @@ def load_analysis(bv):
|
||||
return 0
|
||||
binaryninja.log_info(f"Using capa file {path}")
|
||||
|
||||
doc = json.loads(path.read_bytes().decode("utf-8"))
|
||||
with Path(path).open("r", encoding="utf-8") as file:
|
||||
doc = json.load(file)
|
||||
|
||||
if "meta" not in doc or "rules" not in doc:
|
||||
binaryninja.log_error("doesn't appear to be a capa report")
|
||||
@@ -83,20 +84,35 @@ def load_analysis(bv):
|
||||
binaryninja.log_error("sample mismatch")
|
||||
return -2
|
||||
|
||||
# Retreive base address
|
||||
capa_base_address = 0
|
||||
if "analysis" in doc["meta"] and "base_address" in doc["meta"]["analysis"]:
|
||||
if doc["meta"]["analysis"]["base_address"]["type"] == "absolute":
|
||||
capa_base_address = int(doc["meta"]["analysis"]["base_address"]["value"])
|
||||
|
||||
rows = []
|
||||
for rule in doc["rules"].values():
|
||||
if rule["meta"].get("lib"):
|
||||
continue
|
||||
if rule["meta"].get("capa/subscope"):
|
||||
continue
|
||||
if rule["meta"]["scope"] != "function":
|
||||
if rule["meta"]["scopes"].get("static") != "function":
|
||||
continue
|
||||
|
||||
name = rule["meta"]["name"]
|
||||
ns = rule["meta"].get("namespace", "")
|
||||
for va in rule["matches"].keys():
|
||||
va = int(va)
|
||||
rows.append((ns, name, va))
|
||||
for matches in rule["matches"]:
|
||||
for match in matches:
|
||||
if "type" not in match.keys():
|
||||
continue
|
||||
if "value" not in match.keys():
|
||||
continue
|
||||
va = match["value"]
|
||||
# Substract va and CAPA base_address
|
||||
va = int(va) - capa_base_address
|
||||
# Add binja base address
|
||||
va = va + bv.start
|
||||
rows.append((ns, name, va))
|
||||
|
||||
# order by (namespace, name) so that like things show up together
|
||||
rows = sorted(rows)
|
||||
|
||||
@@ -125,7 +125,7 @@ def render_matches_by_function(doc: rd.ResultDocument):
|
||||
for f in doc.meta.analysis.feature_counts.functions:
|
||||
if not matches_by_function.get(f.address, {}):
|
||||
continue
|
||||
ostream.writeln(f"function at {capa.render.verbose.format_address(addr)} with {f.count} features: ")
|
||||
ostream.writeln(f"function at {capa.render.verbose.format_address(f.address)} with {f.count} features: ")
|
||||
for rule_name in sorted(matches_by_function[f.address]):
|
||||
ostream.writeln(" - " + rule_name)
|
||||
|
||||
|
||||
@@ -171,8 +171,8 @@ def print_dynamic_analysis(extractor: DynamicFeatureExtractor, args):
|
||||
process_handles = tuple(extractor.get_processes())
|
||||
|
||||
if args.process:
|
||||
process_handles = tuple(filter(lambda ph: ph.inner["name"] == args.process, process_handles))
|
||||
if args.process not in [ph.inner["name"] for ph in args.process]:
|
||||
process_handles = tuple(filter(lambda ph: extractor.get_process_name(ph) == args.process, process_handles))
|
||||
if args.process not in [extractor.get_process_name(ph) for ph in process_handles]:
|
||||
print(f"{args.process} not a process")
|
||||
return -1
|
||||
|
||||
@@ -227,13 +227,13 @@ def print_static_features(functions, extractor: StaticFeatureExtractor):
|
||||
|
||||
def print_dynamic_features(processes, extractor: DynamicFeatureExtractor):
|
||||
for p in processes:
|
||||
print(f"proc: {p.inner.process_name} (ppid={p.address.ppid}, pid={p.address.pid})")
|
||||
print(f"proc: {extractor.get_process_name(p)} (ppid={p.address.ppid}, pid={p.address.pid})")
|
||||
|
||||
for feature, addr in extractor.extract_process_features(p):
|
||||
if is_global_feature(feature):
|
||||
continue
|
||||
|
||||
print(f" proc: {p.inner.process_name}: {feature}")
|
||||
print(f" proc: {extractor.get_process_name(p)}: {feature}")
|
||||
|
||||
for t in extractor.get_threads(p):
|
||||
print(f" thread: {t.address.tid}")
|
||||
|
||||
Submodule tests/data updated: 3a7690178b...ad887bbed9
@@ -199,6 +199,16 @@ def get_cape_extractor(path):
|
||||
return CapeExtractor.from_report(report)
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_drakvuf_extractor(path):
|
||||
from capa.helpers import load_jsonl_from_path
|
||||
from capa.features.extractors.drakvuf.extractor import DrakvufExtractor
|
||||
|
||||
report = load_jsonl_from_path(path)
|
||||
|
||||
return DrakvufExtractor.from_report(report)
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_ghidra_extractor(path: Path):
|
||||
import capa.features.extractors.ghidra.extractor
|
||||
@@ -385,6 +395,14 @@ def get_data_path_by_name(name) -> Path:
|
||||
/ "v2.2"
|
||||
/ "d46900384c78863420fb3e297d0a2f743cd2b6b3f7f82bf64059a168e07aceb7.json.gz"
|
||||
)
|
||||
elif name.startswith("93b2d1"):
|
||||
return (
|
||||
CD
|
||||
/ "data"
|
||||
/ "dynamic"
|
||||
/ "drakvuf"
|
||||
/ "93b2d1840566f45fab674ebc79a9d19c88993bcb645e0357f3cb584d16e7c795.log.gz"
|
||||
)
|
||||
elif name.startswith("ea2876"):
|
||||
return CD / "data" / "ea2876e9175410b6f6719f80ee44b9553960758c7d0f7bed73c0fe9a78d8e669.dll_"
|
||||
elif name.startswith("1038a2"):
|
||||
@@ -680,84 +698,6 @@ def parametrize(params, values, **kwargs):
|
||||
return pytest.mark.parametrize(params, values, ids=ids, **kwargs)
|
||||
|
||||
|
||||
DYNAMIC_FEATURE_PRESENCE_TESTS = sorted(
|
||||
[
|
||||
# file/string
|
||||
("0000a657", "file", capa.features.common.String("T_Ba?.BcRJa"), True),
|
||||
("0000a657", "file", capa.features.common.String("GetNamedPipeClientSessionId"), True),
|
||||
("0000a657", "file", capa.features.common.String("nope"), False),
|
||||
# file/sections
|
||||
("0000a657", "file", capa.features.file.Section(".rdata"), True),
|
||||
("0000a657", "file", capa.features.file.Section(".nope"), False),
|
||||
# file/imports
|
||||
("0000a657", "file", capa.features.file.Import("NdrSimpleTypeUnmarshall"), True),
|
||||
("0000a657", "file", capa.features.file.Import("Nope"), False),
|
||||
# file/exports
|
||||
("0000a657", "file", capa.features.file.Export("Nope"), False),
|
||||
# process/environment variables
|
||||
(
|
||||
"0000a657",
|
||||
"process=(1180:3052)",
|
||||
capa.features.common.String("C:\\Users\\comp\\AppData\\Roaming\\Microsoft\\Jxoqwnx\\jxoqwn.exe"),
|
||||
True,
|
||||
),
|
||||
("0000a657", "process=(1180:3052)", capa.features.common.String("nope"), False),
|
||||
# thread/api calls
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("NtQueryValueKey"), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("GetActiveWindow"), False),
|
||||
# thread/number call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(0x000000EC), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(110173), False),
|
||||
# thread/string call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("SetThreadUILanguage"), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("nope"), False),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=56", capa.features.insn.API("NtQueryValueKey"), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=1958", capa.features.insn.API("nope"), False),
|
||||
],
|
||||
# order tests by (file, item)
|
||||
# so that our LRU cache is most effective.
|
||||
key=lambda t: (t[0], t[1]),
|
||||
)
|
||||
|
||||
DYNAMIC_FEATURE_COUNT_TESTS = sorted(
|
||||
[
|
||||
# file/string
|
||||
("0000a657", "file", capa.features.common.String("T_Ba?.BcRJa"), 1),
|
||||
("0000a657", "file", capa.features.common.String("GetNamedPipeClientSessionId"), 1),
|
||||
("0000a657", "file", capa.features.common.String("nope"), 0),
|
||||
# file/sections
|
||||
("0000a657", "file", capa.features.file.Section(".rdata"), 1),
|
||||
("0000a657", "file", capa.features.file.Section(".nope"), 0),
|
||||
# file/imports
|
||||
("0000a657", "file", capa.features.file.Import("NdrSimpleTypeUnmarshall"), 1),
|
||||
("0000a657", "file", capa.features.file.Import("Nope"), 0),
|
||||
# file/exports
|
||||
("0000a657", "file", capa.features.file.Export("Nope"), 0),
|
||||
# process/environment variables
|
||||
(
|
||||
"0000a657",
|
||||
"process=(1180:3052)",
|
||||
capa.features.common.String("C:\\Users\\comp\\AppData\\Roaming\\Microsoft\\Jxoqwnx\\jxoqwn.exe"),
|
||||
2,
|
||||
),
|
||||
("0000a657", "process=(1180:3052)", capa.features.common.String("nope"), 0),
|
||||
# thread/api calls
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("NtQueryValueKey"), 7),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("GetActiveWindow"), 0),
|
||||
# thread/number call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(0x000000EC), 1),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(110173), 0),
|
||||
# thread/string call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("SetThreadUILanguage"), 1),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("nope"), 0),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=56", capa.features.insn.API("NtQueryValueKey"), 1),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=1958", capa.features.insn.API("nope"), 0),
|
||||
],
|
||||
# order tests by (file, item)
|
||||
# so that our LRU cache is most effective.
|
||||
key=lambda t: (t[0], t[1]),
|
||||
)
|
||||
|
||||
FEATURE_PRESENCE_TESTS = sorted(
|
||||
[
|
||||
# file/characteristic("embedded pe")
|
||||
|
||||
@@ -63,4 +63,4 @@ def test_standalone_binja_backend():
|
||||
@pytest.mark.skipif(binja_present is False, reason="Skip binja tests if the binaryninja Python API is not installed")
|
||||
def test_binja_version():
|
||||
version = binaryninja.core_version_info()
|
||||
assert version.major == 4 and version.minor == 0
|
||||
assert version.major == 4 and version.minor == 1
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
import textwrap
|
||||
|
||||
import capa.capabilities.common
|
||||
from capa.features.extractors.base_extractor import FunctionFilter
|
||||
|
||||
|
||||
def test_match_across_scopes_file_function(z9324d_extractor):
|
||||
@@ -174,6 +175,37 @@ def test_subscope_bb_rules(z9324d_extractor):
|
||||
assert "test rule" in capabilities
|
||||
|
||||
|
||||
def test_match_specific_functions(z9324d_extractor):
|
||||
rules = capa.rules.RuleSet(
|
||||
[
|
||||
capa.rules.Rule.from_yaml(
|
||||
textwrap.dedent(
|
||||
"""
|
||||
rule:
|
||||
meta:
|
||||
name: receive data
|
||||
scopes:
|
||||
static: function
|
||||
dynamic: call
|
||||
examples:
|
||||
- 9324d1a8ae37a36ae560c37448c9705a:0x401CD0
|
||||
features:
|
||||
- or:
|
||||
- api: recv
|
||||
"""
|
||||
)
|
||||
)
|
||||
]
|
||||
)
|
||||
extractor = FunctionFilter(z9324d_extractor, {0x4019C0})
|
||||
capabilities, meta = capa.capabilities.common.find_capabilities(rules, extractor)
|
||||
matches = capabilities["receive data"]
|
||||
# test that we received only one match
|
||||
assert len(matches) == 1
|
||||
# and that this match is from the specified function
|
||||
assert matches[0][0] == 0x4019C0
|
||||
|
||||
|
||||
def test_byte_matching(z9324d_extractor):
|
||||
rules = capa.rules.RuleSet(
|
||||
[
|
||||
|
||||
@@ -8,10 +8,96 @@
|
||||
|
||||
import fixtures
|
||||
|
||||
import capa.main
|
||||
import capa.features.file
|
||||
import capa.features.insn
|
||||
import capa.features.common
|
||||
import capa.features.basicblock
|
||||
|
||||
DYNAMIC_CAPE_FEATURE_PRESENCE_TESTS = sorted(
|
||||
[
|
||||
# file/string
|
||||
("0000a657", "file", capa.features.common.String("T_Ba?.BcRJa"), True),
|
||||
("0000a657", "file", capa.features.common.String("GetNamedPipeClientSessionId"), True),
|
||||
("0000a657", "file", capa.features.common.String("nope"), False),
|
||||
# file/sections
|
||||
("0000a657", "file", capa.features.file.Section(".rdata"), True),
|
||||
("0000a657", "file", capa.features.file.Section(".nope"), False),
|
||||
# file/imports
|
||||
("0000a657", "file", capa.features.file.Import("NdrSimpleTypeUnmarshall"), True),
|
||||
("0000a657", "file", capa.features.file.Import("Nope"), False),
|
||||
# file/exports
|
||||
("0000a657", "file", capa.features.file.Export("Nope"), False),
|
||||
# process/environment variables
|
||||
(
|
||||
"0000a657",
|
||||
"process=(1180:3052)",
|
||||
capa.features.common.String("C:\\Users\\comp\\AppData\\Roaming\\Microsoft\\Jxoqwnx\\jxoqwn.exe"),
|
||||
True,
|
||||
),
|
||||
("0000a657", "process=(1180:3052)", capa.features.common.String("nope"), False),
|
||||
# thread/api calls
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("NtQueryValueKey"), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("GetActiveWindow"), False),
|
||||
# thread/number call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(0x000000EC), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(110173), False),
|
||||
# thread/string call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("SetThreadUILanguage"), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("nope"), False),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=56", capa.features.insn.API("NtQueryValueKey"), True),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=1958", capa.features.insn.API("nope"), False),
|
||||
],
|
||||
# order tests by (file, item)
|
||||
# so that our LRU cache is most effective.
|
||||
key=lambda t: (t[0], t[1]),
|
||||
)
|
||||
|
||||
DYNAMIC_CAPE_FEATURE_COUNT_TESTS = sorted(
|
||||
# TODO(yelhamer): use the same sample for testing CAPE and DRAKVUF extractors
|
||||
# https://github.com/mandiant/capa/issues/2180
|
||||
[
|
||||
# file/string
|
||||
("0000a657", "file", capa.features.common.String("T_Ba?.BcRJa"), 1),
|
||||
("0000a657", "file", capa.features.common.String("GetNamedPipeClientSessionId"), 1),
|
||||
("0000a657", "file", capa.features.common.String("nope"), 0),
|
||||
# file/sections
|
||||
("0000a657", "file", capa.features.file.Section(".rdata"), 1),
|
||||
("0000a657", "file", capa.features.file.Section(".nope"), 0),
|
||||
# file/imports
|
||||
("0000a657", "file", capa.features.file.Import("NdrSimpleTypeUnmarshall"), 1),
|
||||
("0000a657", "file", capa.features.file.Import("Nope"), 0),
|
||||
# file/exports
|
||||
("0000a657", "file", capa.features.file.Export("Nope"), 0),
|
||||
# process/environment variables
|
||||
(
|
||||
"0000a657",
|
||||
"process=(1180:3052)",
|
||||
capa.features.common.String("C:\\Users\\comp\\AppData\\Roaming\\Microsoft\\Jxoqwnx\\jxoqwn.exe"),
|
||||
2,
|
||||
),
|
||||
("0000a657", "process=(1180:3052)", capa.features.common.String("nope"), 0),
|
||||
# thread/api calls
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("NtQueryValueKey"), 7),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("GetActiveWindow"), 0),
|
||||
# thread/number call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(0x000000EC), 1),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.Number(110173), 0),
|
||||
# thread/string call argument
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("SetThreadUILanguage"), 1),
|
||||
("0000a657", "process=(2852:3052),thread=2804", capa.features.common.String("nope"), 0),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=56", capa.features.insn.API("NtQueryValueKey"), 1),
|
||||
("0000a657", "process=(2852:3052),thread=2804,call=1958", capa.features.insn.API("nope"), 0),
|
||||
],
|
||||
# order tests by (file, item)
|
||||
# so that our LRU cache is most effective.
|
||||
key=lambda t: (t[0], t[1]),
|
||||
)
|
||||
|
||||
|
||||
@fixtures.parametrize(
|
||||
"sample,scope,feature,expected",
|
||||
fixtures.DYNAMIC_FEATURE_PRESENCE_TESTS,
|
||||
DYNAMIC_CAPE_FEATURE_PRESENCE_TESTS,
|
||||
indirect=["sample", "scope"],
|
||||
)
|
||||
def test_cape_features(sample, scope, feature, expected):
|
||||
@@ -20,7 +106,7 @@ def test_cape_features(sample, scope, feature, expected):
|
||||
|
||||
@fixtures.parametrize(
|
||||
"sample,scope,feature,expected",
|
||||
fixtures.DYNAMIC_FEATURE_COUNT_TESTS,
|
||||
DYNAMIC_CAPE_FEATURE_COUNT_TESTS,
|
||||
indirect=["sample", "scope"],
|
||||
)
|
||||
def test_cape_feature_counts(sample, scope, feature, expected):
|
||||
|
||||
88
tests/test_drakvuf_features.py
Normal file
88
tests/test_drakvuf_features.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
import fixtures
|
||||
|
||||
import capa.main
|
||||
import capa.features.file
|
||||
import capa.features.insn
|
||||
import capa.features.common
|
||||
|
||||
DYNAMIC_DRAKVUF_FEATURE_PRESENCE_TESTS = sorted(
|
||||
[
|
||||
("93b2d1", "file", capa.features.common.String("\\Program Files\\WindowsApps\\does_not_exist"), False),
|
||||
# file/imports
|
||||
("93b2d1", "file", capa.features.file.Import("SetUnhandledExceptionFilter"), True),
|
||||
# thread/api calls
|
||||
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("LdrLoadDll"), True),
|
||||
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("DoesNotExist"), False),
|
||||
# call/api
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("LdrLoadDll"), True),
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("DoesNotExist"), False),
|
||||
# call/string argument
|
||||
(
|
||||
"93b2d1",
|
||||
"process=(3564:4852),thread=6592,call=1",
|
||||
capa.features.common.String('0x667e2beb40:"api-ms-win-core-fibers-l1-1-1"'),
|
||||
True,
|
||||
),
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.common.String("non_existant"), False),
|
||||
# call/number argument
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x801), True),
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x010101010101), False),
|
||||
],
|
||||
# order tests by (file, item)
|
||||
# so that our LRU cache is most effective.
|
||||
key=lambda t: (t[0], t[1]),
|
||||
)
|
||||
|
||||
DYNAMIC_DRAKVUF_FEATURE_COUNT_TESTS = sorted(
|
||||
[
|
||||
("93b2d1", "file", capa.features.common.String("\\Program Files\\WindowsApps\\does_not_exist"), False),
|
||||
# file/imports
|
||||
("93b2d1", "file", capa.features.file.Import("SetUnhandledExceptionFilter"), 1),
|
||||
# thread/api calls
|
||||
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("LdrLoadDll"), 9),
|
||||
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("DoesNotExist"), False),
|
||||
# call/api
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("LdrLoadDll"), 1),
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("DoesNotExist"), 0),
|
||||
# call/string argument
|
||||
(
|
||||
"93b2d1",
|
||||
"process=(3564:4852),thread=6592,call=1",
|
||||
capa.features.common.String('0x667e2beb40:"api-ms-win-core-fibers-l1-1-1"'),
|
||||
1,
|
||||
),
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.common.String("non_existant"), 0),
|
||||
# call/number argument
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x801), 1),
|
||||
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x010101010101), 0),
|
||||
],
|
||||
# order tests by (file, item)
|
||||
# so that our LRU cache is most effective.
|
||||
key=lambda t: (t[0], t[1]),
|
||||
)
|
||||
|
||||
|
||||
@fixtures.parametrize(
|
||||
"sample,scope,feature,expected",
|
||||
DYNAMIC_DRAKVUF_FEATURE_PRESENCE_TESTS,
|
||||
indirect=["sample", "scope"],
|
||||
)
|
||||
def test_drakvuf_features(sample, scope, feature, expected):
|
||||
fixtures.do_test_feature_presence(fixtures.get_drakvuf_extractor, sample, scope, feature, expected)
|
||||
|
||||
|
||||
@fixtures.parametrize(
|
||||
"sample,scope,feature,expected",
|
||||
DYNAMIC_DRAKVUF_FEATURE_COUNT_TESTS,
|
||||
indirect=["sample", "scope"],
|
||||
)
|
||||
def test_drakvuf_feature_counts(sample, scope, feature, expected):
|
||||
fixtures.do_test_feature_count(fixtures.get_drakvuf_extractor, sample, scope, feature, expected)
|
||||
48
tests/test_drakvuf_models.py
Normal file
48
tests/test_drakvuf_models.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at: [package root]/LICENSE.txt
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and limitations under the License.
|
||||
import json
|
||||
|
||||
from capa.features.extractors.drakvuf.models import SystemCall
|
||||
|
||||
|
||||
def test_syscall_argument_construction():
|
||||
call_dictionary = json.loads(
|
||||
r"""
|
||||
{
|
||||
"Plugin": "syscall",
|
||||
"TimeStamp": "1716999134.581449",
|
||||
"PID": 3888,
|
||||
"PPID": 2852,
|
||||
"TID": 368,
|
||||
"UserName": "SessionID",
|
||||
"UserId": 2,
|
||||
"ProcessName": "\\Device\\HarddiskVolume2\\Windows\\explorer.exe",
|
||||
"Method": "NtRemoveIoCompletionEx",
|
||||
"EventUID": "0x1f",
|
||||
"Module": "nt",
|
||||
"vCPU": 0,
|
||||
"CR3": "0x119b1002",
|
||||
"Syscall": 369,
|
||||
"NArgs": 6,
|
||||
"IoCompletionHandle": "0xffffffff80001ac0",
|
||||
"IoCompletionInformation": "0xfffff506a0284898",
|
||||
"Count": "0x1",
|
||||
"NumEntriesRemoved": "0xfffff506a02846bc",
|
||||
"Timeout": "0xfffff506a02846d8",
|
||||
"Alertable": "0x0"
|
||||
}
|
||||
"""
|
||||
)
|
||||
call = SystemCall(**call_dictionary)
|
||||
assert len(call.arguments) == call.nargs
|
||||
assert call.arguments["IoCompletionHandle"] == "0xffffffff80001ac0"
|
||||
assert call.arguments["IoCompletionInformation"] == "0xfffff506a0284898"
|
||||
assert call.arguments["Count"] == "0x1"
|
||||
assert call.arguments["NumEntriesRemoved"] == "0xfffff506a02846bc"
|
||||
assert call.arguments["Timeout"] == "0xfffff506a02846d8"
|
||||
assert call.arguments["Alertable"] == "0x0"
|
||||
@@ -23,10 +23,21 @@ def get_script_path(s: str):
|
||||
return str(CD / ".." / "scripts" / s)
|
||||
|
||||
|
||||
def get_file_path():
|
||||
def get_binary_file_path():
|
||||
return str(CD / "data" / "9324d1a8ae37a36ae560c37448c9705a.exe_")
|
||||
|
||||
|
||||
def get_report_file_path():
|
||||
return str(
|
||||
CD
|
||||
/ "data"
|
||||
/ "dynamic"
|
||||
/ "cape"
|
||||
/ "v2.4"
|
||||
/ "fb7ade52dc5a1d6128b9c217114a46d0089147610f99f5122face29e429a1e74.json.gz"
|
||||
)
|
||||
|
||||
|
||||
def get_rules_path():
|
||||
return str(CD / ".." / "rules")
|
||||
|
||||
@@ -48,12 +59,13 @@ def get_rule_path():
|
||||
pytest.param("lint.py", ["-t", "create directory", get_rules_path()]),
|
||||
# `create directory` rule has native and .NET example PEs
|
||||
pytest.param("lint.py", ["--thorough", "-t", "create directory", get_rules_path()]),
|
||||
pytest.param("match-function-id.py", [get_file_path()]),
|
||||
pytest.param("show-capabilities-by-function.py", [get_file_path()]),
|
||||
pytest.param("show-features.py", [get_file_path()]),
|
||||
pytest.param("show-features.py", ["-F", "0x407970", get_file_path()]),
|
||||
pytest.param("show-unused-features.py", [get_file_path()]),
|
||||
pytest.param("capa_as_library.py", [get_file_path()]),
|
||||
pytest.param("match-function-id.py", [get_binary_file_path()]),
|
||||
pytest.param("show-capabilities-by-function.py", [get_binary_file_path()]),
|
||||
pytest.param("show-features.py", [get_binary_file_path()]),
|
||||
pytest.param("show-features.py", ["-F", "0x407970", get_binary_file_path()]),
|
||||
pytest.param("show-features.py", ["-P", "MicrosoftEdgeUpdate.exe", get_report_file_path()]),
|
||||
pytest.param("show-unused-features.py", [get_binary_file_path()]),
|
||||
pytest.param("capa_as_library.py", [get_binary_file_path()]),
|
||||
],
|
||||
)
|
||||
def test_scripts(script, args):
|
||||
|
||||
13
web/explorer/.eslintrc.cjs
Normal file
13
web/explorer/.eslintrc.cjs
Normal file
@@ -0,0 +1,13 @@
|
||||
/* eslint-env node */
|
||||
require("@rushstack/eslint-patch/modern-module-resolution");
|
||||
|
||||
module.exports = {
|
||||
root: true,
|
||||
extends: ["plugin:vue/vue3-essential", "eslint:recommended", "@vue/eslint-config-prettier/skip-formatting"],
|
||||
parserOptions: {
|
||||
ecmaVersion: "latest"
|
||||
},
|
||||
rules: {
|
||||
"vue/multi-word-component-names": "off"
|
||||
}
|
||||
};
|
||||
30
web/explorer/.gitignore
vendored
Normal file
30
web/explorer/.gitignore
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
# Dependencies, build results, and other generated files
|
||||
node_modules
|
||||
.DS_Store
|
||||
dist
|
||||
dist-ssr
|
||||
coverage
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.vscode
|
||||
.idea
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
|
||||
# TypeScript incremental build info
|
||||
*.tsbuildinfo
|
||||
8
web/explorer/.prettierrc.json
Normal file
8
web/explorer/.prettierrc.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"$schema": "https://json.schemastore.org/prettierrc",
|
||||
"semi": true,
|
||||
"tabWidth": 4,
|
||||
"singleQuote": false,
|
||||
"printWidth": 120,
|
||||
"trailingComma": "none"
|
||||
}
|
||||
123
web/explorer/DEVELOPMENT.md
Normal file
123
web/explorer/DEVELOPMENT.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Development Guide for Capa Explorer Web
|
||||
|
||||
This guide will help you set up the Capa Explorer Web project for local development.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, ensure you have the following installed:
|
||||
|
||||
- Node.js (v20.x or later recommended)
|
||||
- npm (v10.x or later)
|
||||
- Git
|
||||
|
||||
## Setting Up the Development Environment
|
||||
|
||||
1. Clone the repository:
|
||||
|
||||
```
|
||||
git clone https://github.com/mandiat/capa.git
|
||||
cd capa/web/explorer
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
|
||||
```
|
||||
npm install
|
||||
```
|
||||
|
||||
3. Start the development server:
|
||||
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
|
||||
This will start the Vite development server. The application should now be running at `http://localhost:<port>`
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
web/exporer/
|
||||
├── src/
|
||||
│ ├── assets/
|
||||
│ ├── components/
|
||||
│ ├── composables/
|
||||
│ ├── router/
|
||||
│ ├── utils/
|
||||
│ ├── views/
|
||||
│ ├── App.vue
|
||||
│ └── main.js
|
||||
├── public/
|
||||
├── tests/
|
||||
├── index.html
|
||||
├── package.json
|
||||
├── vite.config.js
|
||||
├── DEVELOPMENT.md
|
||||
└── README.md
|
||||
```
|
||||
|
||||
- `src/`: Contains the source code of the application
|
||||
- `src/components/`: Reusable Vue components
|
||||
- `src/composables/`: Vue composition functions
|
||||
- `src/router/`: Vue Router configuration
|
||||
- `src/utils/`: Utility functions
|
||||
- `src/views/`: Top-level views/pages
|
||||
- `src/tests/`: Test files
|
||||
- `public/`: Static assets that will be served as-is
|
||||
|
||||
## Building for Production
|
||||
|
||||
To build the application for production:
|
||||
|
||||
```
|
||||
npm run build
|
||||
```
|
||||
|
||||
This will generate production-ready files in the `dist/` directory.
|
||||
|
||||
Or, you can build a standalone bundle application that can be used offline:
|
||||
|
||||
```
|
||||
npm run build:bundle
|
||||
```
|
||||
|
||||
This will generate an offline HTML bundle file in the `dist/` directory.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the test suite with:
|
||||
|
||||
```
|
||||
npm run test
|
||||
```
|
||||
|
||||
We use Vitest as our testing framework. Please ensure all tests pass before submitting a pull request.
|
||||
|
||||
## Linting and Formatting
|
||||
|
||||
We use ESLint for linting and Prettier for code formatting. Run the linter with:
|
||||
|
||||
```
|
||||
npm run lint
|
||||
npm run format
|
||||
```
|
||||
|
||||
## Working with PrimeVue Components
|
||||
|
||||
Capa Explorer Web uses the PrimeVue UI component library. When adding new features or modifying existing ones, refer to the [PrimeVue documentation](https://primevue.org/vite) for available components and their usage.
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Follow the [Vue.js Style Guide](https://vuejs.org/style-guide/) for consistent code style.
|
||||
2. Document new functions, components, and complex logic.
|
||||
3. Write tests for new features and bug fixes.
|
||||
4. Keep components small and focused on a single responsibility.
|
||||
5. Use composables for reusable logic across components.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Vue.js Documentation](https://vuejs.org/guide/introduction.html)
|
||||
- [Vite Documentation](https://vitejs.dev/guide/)
|
||||
- [Vitest Documentation](https://vitest.dev/guide/)
|
||||
- [PrimeVue Documentation](https://www.primevue.org/)
|
||||
|
||||
If you encounter any issues or have questions about the development process, please open an issue on the GitHub repository.
|
||||
41
web/explorer/README.md
Normal file
41
web/explorer/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Capa Explorer Web
|
||||
|
||||
Capa Explorer Web is a browser-based user interface for exploring program capabilities identified by capa. It provides an intuitive and interactive way to analyze and visualize the results of capa analysis.
|
||||
|
||||
## Features
|
||||
|
||||
- **Import capa Results**: Easily upload or import capa JSON result files.
|
||||
- **Interactive Tree View**: Explore and filter rule matches in a hierarchical structure.
|
||||
- **Function Capabilities**: Group and filter capabilities by function for static analysis.
|
||||
- **Process Capabilities**: Group capabilities by process for dynamic analysis.
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Access the Application**: Open Capa Explorer Web in your web browser.
|
||||
You can start using Capa Explorer Web by accessing [https://mandiant.github.io/capa](https://mandiant.github.io/capa/) or running it locally by dowloading the offline release in the [releases](https://github.com/mandiant/capa/releases) section and loading it in your browser.
|
||||
|
||||
2. **Import capa Results**:
|
||||
|
||||
- Click on "Upload from local" to select a capa analysis document file from your computer (with a version higher than 7.0.0).
|
||||
- Or, paste a URL to a capa JSON file and click the arrow button to load it.
|
||||
- Alternatively, use the "Preview Static" or "Preview Dynamic" for sample data.
|
||||
|
||||
3. **Explore the Results**:
|
||||
|
||||
- Use the tree view to navigate through the identified capabilities.
|
||||
- Toggle between different views using the checkboxes in the settings panel:
|
||||
- "Show capabilities by function/process" for grouped analysis.
|
||||
- "Show library rule matches" to include or exclude library rules.
|
||||
|
||||
4. **Interact with the Data**:
|
||||
- Expand/collapse nodes in the table to see more details.
|
||||
- Use the search and filter options to find specific features, functions or capabilities (rules).
|
||||
- Right click on rule names to view their source code or additional information.
|
||||
|
||||
## Feedback and Contributions
|
||||
|
||||
We welcome your feedback and contributions to improve the web-based Capa Explorer. Please report any issues or suggest enhancements through the `capa` GitHub repository.
|
||||
|
||||
---
|
||||
|
||||
For developers interested in building or contributing to Capa Explorer WebUI, please refer to our [Development Guide](DEVELOPMENT.md).
|
||||
13
web/explorer/index.html
Normal file
13
web/explorer/index.html
Normal file
@@ -0,0 +1,13 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Capa Explorer</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="/src/main.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
8
web/explorer/jsconfig.json
Normal file
8
web/explorer/jsconfig.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
}
|
||||
},
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
4133
web/explorer/package-lock.json
generated
Normal file
4133
web/explorer/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
40
web/explorer/package.json
Normal file
40
web/explorer/package.json
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"name": "capa-webui",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"build:bundle": "vite build --mode bundle --outDir=capa-explorer-web",
|
||||
"preview": "vite preview",
|
||||
"test": "vitest",
|
||||
"lint": "eslint . --ext .vue,.js,.jsx,.cjs,.mjs --fix --ignore-path .gitignore",
|
||||
"format": "prettier --write src/",
|
||||
"format:check": "prettier --check src/"
|
||||
},
|
||||
"dependencies": {
|
||||
"@highlightjs/vue-plugin": "^2.1.0",
|
||||
"@primevue/themes": "^4.0.0-rc.2",
|
||||
"pako": "^2.1.0",
|
||||
"plotly.js-dist": "^2.34.0",
|
||||
"primeflex": "^3.3.1",
|
||||
"primeicons": "^7.0.0",
|
||||
"primevue": "^4.0.0-rc.2",
|
||||
"vue": "^3.4.29",
|
||||
"vue-router": "^4.3.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@rushstack/eslint-patch": "^1.8.0",
|
||||
"@vitejs/plugin-vue": "^5.0.5",
|
||||
"@vue/eslint-config-prettier": "^9.0.0",
|
||||
"@vue/test-utils": "^2.4.6",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-plugin-vue": "^9.23.0",
|
||||
"jsdom": "^24.1.0",
|
||||
"prettier": "^3.2.5",
|
||||
"vite": "^5.3.1",
|
||||
"vite-plugin-singlefile": "^2.0.2",
|
||||
"vitest": "^1.6.0"
|
||||
}
|
||||
}
|
||||
BIN
web/explorer/public/favicon.ico
Normal file
BIN
web/explorer/public/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
16
web/explorer/src/App.vue
Normal file
16
web/explorer/src/App.vue
Normal file
@@ -0,0 +1,16 @@
|
||||
<template>
|
||||
<Toast position="bottom-center" group="bc" />
|
||||
<header>
|
||||
<div class="wrapper">
|
||||
<BannerHeader />
|
||||
<NavBar />
|
||||
</div>
|
||||
</header>
|
||||
<RouterView />
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { RouterView } from "vue-router";
|
||||
import NavBar from "./components/NavBar.vue";
|
||||
import BannerHeader from "./components/BannerHeader.vue";
|
||||
</script>
|
||||
BIN
web/explorer/src/assets/images/icon.png
Normal file
BIN
web/explorer/src/assets/images/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.4 KiB |
BIN
web/explorer/src/assets/images/logo-full.png
Normal file
BIN
web/explorer/src/assets/images/logo-full.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 9.8 KiB |
28
web/explorer/src/assets/main.css
Normal file
28
web/explorer/src/assets/main.css
Normal file
@@ -0,0 +1,28 @@
|
||||
body {
|
||||
margin: 0 auto;
|
||||
font-weight: normal;
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
}
|
||||
|
||||
a {
|
||||
text-decoration: none;
|
||||
color: inherit;
|
||||
transition: color 0.15s ease-in-out;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: var(--primary-color);
|
||||
}
|
||||
|
||||
.font-monospace {
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
.cursor-default {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/* remove the border from rows other than rule names */
|
||||
.p-treetable-tbody > tr:not(:is([aria-level="1"])) > td {
|
||||
border: none !important;
|
||||
}
|
||||
45
web/explorer/src/components/BannerHeader.vue
Normal file
45
web/explorer/src/components/BannerHeader.vue
Normal file
@@ -0,0 +1,45 @@
|
||||
<template>
|
||||
<div
|
||||
v-if="showBanner"
|
||||
class="bg-bluegray-900 text-gray-100 flex justify-content-between lg:justify-content-center align-items-center flex-wrap"
|
||||
>
|
||||
<div class="font-bold mr-8">This is an early release</div>
|
||||
<div class="align-items-center hidden lg:flex">
|
||||
<span class="line-height-3">Please report any bugs, enhancements or features in the </span>
|
||||
<a
|
||||
v-ripple
|
||||
href="https://github.com/mandiant/capa/issues"
|
||||
class="flex align-items-center ml-2 mr-8 text-white"
|
||||
>
|
||||
<span class="no-underline font-bold">Github issues</span>
|
||||
<i class="pi pi-github ml-2"></i>
|
||||
</a>
|
||||
</div>
|
||||
<a
|
||||
v-ripple
|
||||
@click="closeBanner"
|
||||
class="flex align-items-center no-underline justify-content-center border-circle text-gray-50 hover:bg-bluegray-700 cursor-pointer transition-colors transition-duration-150"
|
||||
style="width: 2rem; height: 2rem"
|
||||
>
|
||||
<i class="pi pi-times"></i>
|
||||
</a>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, onMounted } from "vue";
|
||||
|
||||
const showBanner = ref(true);
|
||||
|
||||
onMounted(() => {
|
||||
const bannerHidden = localStorage.getItem("bannerHidden");
|
||||
if (bannerHidden === "true") {
|
||||
showBanner.value = false;
|
||||
}
|
||||
});
|
||||
|
||||
const closeBanner = () => {
|
||||
showBanner.value = false;
|
||||
localStorage.setItem("bannerHidden", "true");
|
||||
};
|
||||
</script>
|
||||
16
web/explorer/src/components/DescriptionPanel.vue
Normal file
16
web/explorer/src/components/DescriptionPanel.vue
Normal file
@@ -0,0 +1,16 @@
|
||||
<template>
|
||||
<div class="flex flex-column align-items-center">
|
||||
<div class="text-center">
|
||||
<h1>
|
||||
<img src="@/assets/images/logo-full.png" alt="Capa: identify program capabilities" />
|
||||
<h6 class="font-medium" style="color: rgb(176, 26, 26)">capa: identify program capabilities</h6>
|
||||
</h1>
|
||||
</div>
|
||||
<div>
|
||||
<p class="text-xl max-w-75rem" style="max-width: 75ch">
|
||||
Capa-WebUI is a web-based tool for exploring the capabilities identified in a program. It can be used to
|
||||
search and display the rule matches in different viewing modes.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
130
web/explorer/src/components/FunctionCapabilities.vue
Normal file
130
web/explorer/src/components/FunctionCapabilities.vue
Normal file
@@ -0,0 +1,130 @@
|
||||
<template>
|
||||
<DataTable
|
||||
:value="tableData"
|
||||
rowGroupMode="rowspan"
|
||||
groupRowsBy="address"
|
||||
removableSort
|
||||
size="small"
|
||||
:filters="filters"
|
||||
:filterMode="filterMode"
|
||||
filterDisplay="row"
|
||||
:globalFilterFields="['address', 'rule', 'namespace']"
|
||||
>
|
||||
<template #header>
|
||||
<IconField>
|
||||
<InputIcon class="pi pi-search" />
|
||||
<InputText v-model="filters['global'].value" placeholder="Global search" />
|
||||
</IconField>
|
||||
</template>
|
||||
|
||||
<Column
|
||||
field="address"
|
||||
sortable
|
||||
header="Function Address"
|
||||
class="w-min"
|
||||
:showFilterMenu="false"
|
||||
:showClearButton="false"
|
||||
>
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText v-model="filters['address'].value" placeholder="Filter by function address" />
|
||||
</template>
|
||||
<template #body="{ data }">
|
||||
<span class="font-monospace text-base">{{ data.address }}</span>
|
||||
<span v-if="data.matchCount > 1" class="font-italic">
|
||||
({{ data.matchCount }} match{{ data.matchCount > 1 ? "es" : "" }})
|
||||
</span>
|
||||
</template>
|
||||
</Column>
|
||||
|
||||
<Column field="rule" header="Rule Matches" class="w-min" :showFilterMenu="false" :showClearButton="false">
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText v-model="filters['rule'].value" placeholder="Filter by rule" />
|
||||
</template>
|
||||
<template #body="{ data }">
|
||||
{{ data.rule }}
|
||||
<LibraryTag v-if="data.lib" />
|
||||
</template>
|
||||
</Column>
|
||||
|
||||
<Column field="namespace" header="Namespace" :showFilterMenu="false" :showClearButton="false">
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText v-model="filters['namespace'].value" placeholder="Filter by namespace" />
|
||||
</template>
|
||||
</Column>
|
||||
</DataTable>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, computed, onMounted } from "vue";
|
||||
import DataTable from "primevue/datatable";
|
||||
import Column from "primevue/column";
|
||||
import IconField from "primevue/iconfield";
|
||||
import InputIcon from "primevue/inputicon";
|
||||
import InputText from "primevue/inputtext";
|
||||
import LibraryTag from "@/components/misc/LibraryTag.vue";
|
||||
|
||||
import { parseFunctionCapabilities } from "@/utils/rdocParser";
|
||||
|
||||
const props = defineProps({
|
||||
data: {
|
||||
type: Object,
|
||||
required: true
|
||||
},
|
||||
showLibraryRules: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
},
|
||||
showColumnFilters: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
});
|
||||
|
||||
const filters = ref({
|
||||
global: { value: null, matchMode: "contains" },
|
||||
address: { value: null, matchMode: "contains" },
|
||||
rule: { value: null, matchMode: "contains" },
|
||||
namespace: { value: null, matchMode: "contains" }
|
||||
});
|
||||
const filterMode = ref("lenient");
|
||||
|
||||
const functionCapabilities = ref([]);
|
||||
|
||||
onMounted(() => {
|
||||
functionCapabilities.value = parseFunctionCapabilities(props.data);
|
||||
});
|
||||
|
||||
/*
|
||||
* tableData is the data passed to the DataTable component
|
||||
* it is a computed property (that is because it gets re-executed everytime props.showLibraryRules changes)
|
||||
* it is an array of objects, where each object represents a row in the table
|
||||
* it also converts the output of parseFunctionCapabilities into a format that can be used by the DataTable component
|
||||
*/
|
||||
|
||||
const tableData = computed(() => {
|
||||
const data = [];
|
||||
for (const fcaps of functionCapabilities.value) {
|
||||
const capabilities = fcaps.capabilities;
|
||||
for (const capability of capabilities) {
|
||||
if (capability.lib && !props.showLibraryRules) continue;
|
||||
data.push({
|
||||
address: fcaps.address,
|
||||
matchCount: capabilities.length,
|
||||
rule: capability.name,
|
||||
namespace: capability.namespace,
|
||||
lib: capability.lib
|
||||
});
|
||||
}
|
||||
}
|
||||
return data;
|
||||
});
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
/* tighten up the spacing between rows, and change border color */
|
||||
:deep(.p-datatable-tbody > tr > td) {
|
||||
padding: 0.2rem 0.5rem !important;
|
||||
border-width: 0 0 1px 0;
|
||||
border-color: #97a0ab;
|
||||
}
|
||||
</style>
|
||||
109
web/explorer/src/components/MetadataPanel.vue
Normal file
109
web/explorer/src/components/MetadataPanel.vue
Normal file
@@ -0,0 +1,109 @@
|
||||
<template>
|
||||
<!-- Main container with gradient background -->
|
||||
<div
|
||||
class="flex flex-wrap align-items-center justify-content-between w-full p-3 shadow-1"
|
||||
:style="{ background: 'linear-gradient(to right, #2c3e50, #3498db)' }"
|
||||
>
|
||||
<!-- File information section -->
|
||||
<div class="flex-grow-1 mr-3">
|
||||
<h1 class="text-xl m-0 text-white">
|
||||
{{ fileName }}
|
||||
</h1>
|
||||
<p class="text-xs mt-1 mb-0 text-white-alpha-70">
|
||||
SHA256:
|
||||
<a :href="`https://www.virustotal.com/gui/file/${sha256}`" target="_blank">{{ sha256 }} </a>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<!-- Vertical divider -->
|
||||
<div class="mx-3 bg-white-alpha-30 hidden sm:block" style="width: 1px; height: 30px"></div>
|
||||
|
||||
<!-- Analysis information section -->
|
||||
<div class="flex-grow-1 mr-3">
|
||||
<!-- OS • Program Format • Arch -->
|
||||
<div class="flex align-items-center text-sm m-0 line-height-3 text-white">
|
||||
<span class="capitalize">{{ data.meta.analysis.os }}</span>
|
||||
<span class="ml-2 mr-2 text-white-alpha-30"> • </span>
|
||||
<span class="uppercase">{{ data.meta.analysis.format }}</span>
|
||||
<span class="ml-2 mr-2 text-white-alpha-30"> • </span>
|
||||
<span>{{ data.meta.analysis.arch === "i386" ? "i386" : data.meta.analysis.arch.toUpperCase() }}</span>
|
||||
</div>
|
||||
<!-- Flavor • Extractor • CAPA Version • Timestamp -->
|
||||
<div class="flex-wrap align-items-center text-sm m-0 line-height-3 text-white">
|
||||
<span class="capitalize">
|
||||
{{ flavor }} analysis with {{ data.meta.analysis.extractor.split(/(Feature)?Extractor/)[0] }}</span
|
||||
>
|
||||
<!--- Extractor (e.g., CapeExtractor -> Cape, GhidraFeatureExtractor -> Ghidra, ... etc) -->
|
||||
<span class="mx-2 text-white-alpha-30"> • </span>
|
||||
<span>CAPA v{{ data.meta.version }}</span>
|
||||
<span class="mx-2 text-white-alpha-30"> • </span>
|
||||
<span>{{ new Date(data.meta.timestamp).toLocaleString() }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Vertical divider -->
|
||||
<div class="mx-3 bg-white-alpha-30 hidden sm:block" style="width: 1px; height: 30px"></div>
|
||||
|
||||
<!-- Key metrics section -->
|
||||
<div class="flex justify-content-around flex-grow-1">
|
||||
<!-- Rules count -->
|
||||
<div class="text-center">
|
||||
<span class="block text-xl font-bold text-white">{{ keyMetrics.ruleCount }}</span>
|
||||
<span class="block text-xs uppercase text-white-alpha-70">Rules</span>
|
||||
</div>
|
||||
<!-- Namespaces count -->
|
||||
<div class="text-center">
|
||||
<span class="block text-xl font-bold text-white">{{ keyMetrics.namespaceCount }}</span>
|
||||
<span class="block text-xs uppercase text-white-alpha-70">Namespaces</span>
|
||||
</div>
|
||||
<!-- Functions or Processes count -->
|
||||
<div class="text-center">
|
||||
<span class="block text-xl font-bold text-white">{{ keyMetrics.functionOrProcessCount }}</span>
|
||||
<span class="block text-xs uppercase text-white-alpha-70">
|
||||
{{ flavor === "static" ? "Functions" : "Processes" }}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
<script setup>
|
||||
import { ref, onMounted } from "vue";
|
||||
|
||||
const props = defineProps({
|
||||
data: {
|
||||
type: Object,
|
||||
required: true
|
||||
}
|
||||
});
|
||||
|
||||
let keyMetrics = ref({
|
||||
ruleCount: 0,
|
||||
namespaceCount: 0,
|
||||
functionOrProcessCount: 0
|
||||
});
|
||||
|
||||
// get the filename from the path, e.g. "malware.exe" from "/home/user/malware.exe"
|
||||
const fileName = props.data.meta.sample.path.split("/").pop();
|
||||
// get the flavor from the metadata, e.g. "dynamic" or "static"
|
||||
const flavor = props.data.meta.flavor;
|
||||
// get the SHA256 hash from the metadata
|
||||
const sha256 = props.data.meta.sample.sha256;
|
||||
|
||||
// Function to parse metadata and update key metrics
|
||||
const parseMetadata = () => {
|
||||
if (props.data) {
|
||||
keyMetrics.value = {
|
||||
ruleCount: Object.keys(props.data.rules).length,
|
||||
namespaceCount: new Set(Object.values(props.data.rules).map((rule) => rule.meta.namespace)).size,
|
||||
functionOrProcessCount:
|
||||
props.data.meta.analysis.feature_counts.functions?.length ||
|
||||
props.data.meta.analysis.feature_counts.processes?.length
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Call parseMetadata when the component is mounted
|
||||
onMounted(() => {
|
||||
parseMetadata();
|
||||
});
|
||||
</script>
|
||||
117
web/explorer/src/components/NamespaceChart.vue
Normal file
117
web/explorer/src/components/NamespaceChart.vue
Normal file
@@ -0,0 +1,117 @@
|
||||
<template>
|
||||
<div ref="chartRef" class="w-screen h-screen"></div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, onMounted } from "vue";
|
||||
import Plotly from "plotly.js-dist";
|
||||
|
||||
const props = defineProps({
|
||||
data: {
|
||||
type: Object,
|
||||
required: true
|
||||
}
|
||||
});
|
||||
|
||||
const chartRef = ref(null);
|
||||
|
||||
const createSunburstData = (rules) => {
|
||||
const data = {
|
||||
ids: [],
|
||||
labels: [],
|
||||
parents: [],
|
||||
values: []
|
||||
};
|
||||
|
||||
const addNamespace = (namespace, value) => {
|
||||
const parts = namespace.split("/");
|
||||
let currentId = "";
|
||||
let parent = "";
|
||||
|
||||
parts.forEach((part) => {
|
||||
currentId = currentId ? `${currentId}/${part}` : part;
|
||||
|
||||
if (!data.ids.includes(currentId)) {
|
||||
data.ids.push(currentId);
|
||||
data.labels.push(part);
|
||||
data.parents.push(parent);
|
||||
data.values.push(0);
|
||||
}
|
||||
|
||||
const valueIndex = data.ids.indexOf(currentId);
|
||||
data.values[valueIndex] += value;
|
||||
|
||||
parent = currentId;
|
||||
});
|
||||
|
||||
return parent;
|
||||
};
|
||||
|
||||
Object.entries(rules).forEach(([ruleName, rule]) => {
|
||||
if (rule.meta.lib) return; // Skip library rules
|
||||
|
||||
const namespace = rule.meta.namespace || "root";
|
||||
const parent = addNamespace(namespace, rule.matches.length);
|
||||
|
||||
// Add the rule itself
|
||||
data.ids.push(ruleName);
|
||||
data.labels.push(rule.meta.name);
|
||||
data.parents.push(parent);
|
||||
data.values.push(rule.matches.length);
|
||||
});
|
||||
|
||||
return data;
|
||||
};
|
||||
|
||||
const renderChart = () => {
|
||||
if (!chartRef.value) return;
|
||||
|
||||
const sunburstData = createSunburstData(props.data.rules);
|
||||
|
||||
const layout = {
|
||||
margin: { l: 0, r: 0, b: 0, t: 0 },
|
||||
sunburstcolorway: [
|
||||
"#636efa",
|
||||
"#EF553B",
|
||||
"#00cc96",
|
||||
"#ab63fa",
|
||||
"#19d3f3",
|
||||
"#e763fa",
|
||||
"#FECB52",
|
||||
"#FFA15A",
|
||||
"#FF6692",
|
||||
"#B6E880"
|
||||
],
|
||||
extendsunburstcolorway: true
|
||||
};
|
||||
|
||||
const config = {
|
||||
responsive: true
|
||||
};
|
||||
|
||||
Plotly.newPlot(
|
||||
chartRef.value,
|
||||
[
|
||||
{
|
||||
type: "sunburst",
|
||||
ids: sunburstData.ids,
|
||||
labels: sunburstData.labels,
|
||||
parents: sunburstData.parents,
|
||||
values: sunburstData.values,
|
||||
outsidetextfont: { size: 20, color: "#377eb8" },
|
||||
leaf: { opacity: 0.4 },
|
||||
marker: { line: { width: 2 } },
|
||||
branchvalues: "total"
|
||||
}
|
||||
],
|
||||
layout,
|
||||
config
|
||||
);
|
||||
|
||||
return sunburstData;
|
||||
};
|
||||
|
||||
onMounted(() => {
|
||||
renderChart();
|
||||
});
|
||||
</script>
|
||||
30
web/explorer/src/components/NavBar.vue
Normal file
30
web/explorer/src/components/NavBar.vue
Normal file
@@ -0,0 +1,30 @@
|
||||
<script setup>
|
||||
import Menubar from "primevue/menubar";
|
||||
import { RouterLink } from "vue-router";
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<Menubar class="p-1">
|
||||
<template #start>
|
||||
<RouterLink to="/">
|
||||
<img src="@/assets/images/icon.png" alt="Logo" class="w-2rem" />
|
||||
</RouterLink>
|
||||
</template>
|
||||
<template #end>
|
||||
<div class="flex align-items-center gap-3">
|
||||
<a
|
||||
v-ripple
|
||||
v-tooltip.right="'Download capa Explorer Web for offline usage'"
|
||||
href="./capa-explorer-web.zip"
|
||||
download="capa-explorer-web.zip"
|
||||
aria-label="Download capa Explorer Web release"
|
||||
>
|
||||
<i class="pi pi-download text-xl"></i>
|
||||
</a>
|
||||
<a v-ripple href="https://github.com/mandiant/capa" class="flex justify-content-center w-2rem">
|
||||
<i class="pi pi-github text-2xl"></i>
|
||||
</a>
|
||||
</div>
|
||||
</template>
|
||||
</Menubar>
|
||||
</template>
|
||||
221
web/explorer/src/components/ProcessCapabilities.vue
Normal file
221
web/explorer/src/components/ProcessCapabilities.vue
Normal file
@@ -0,0 +1,221 @@
|
||||
<template>
|
||||
<div class="card">
|
||||
<TreeTable
|
||||
:value="processTree"
|
||||
v-model:expandedKeys="expandedKeys"
|
||||
:filters="filters"
|
||||
filterMode="lenient"
|
||||
sortField="pid"
|
||||
:sortOrder="1"
|
||||
:rowHover="true"
|
||||
>
|
||||
<Column field="processname" header="Process" expander>
|
||||
<template #body="slotProps">
|
||||
<span
|
||||
:id="'process-' + slotProps.node.key"
|
||||
class="cursor-pointer flex align-items-center"
|
||||
@mouseenter="showTooltip($event, slotProps.node)"
|
||||
@mouseleave="hideTooltip"
|
||||
>
|
||||
<span
|
||||
class="text-lg text-overflow-ellipsis overflow-hidden white-space-nowrap inline-block max-w-20rem font-monospace"
|
||||
>
|
||||
{{ slotProps.node.data.processname }}
|
||||
</span>
|
||||
<span class="ml-2"> - PID: {{ slotProps.node.data.pid }} </span>
|
||||
<span v-if="slotProps.node.data.uniqueMatchCount > 0" class="font-italic ml-2">
|
||||
({{ slotProps.node.data.uniqueMatchCount }} unique
|
||||
{{ slotProps.node.data.uniqueMatchCount > 1 ? "matches" : "match" }})
|
||||
</span>
|
||||
</span>
|
||||
</template>
|
||||
</Column>
|
||||
<Column field="pid" header="PID" sortable>
|
||||
<template #body="slotProps">
|
||||
<span :style="{ color: getColorForId(slotProps.node.data.pid) }">
|
||||
{{ slotProps.node.data.pid }}
|
||||
</span>
|
||||
</template>
|
||||
</Column>
|
||||
<Column field="ppid" header="PPID" sortable>
|
||||
<template #body="slotProps">
|
||||
<span :style="{ color: getColorForId(slotProps.node.data.ppid) }">
|
||||
{{ slotProps.node.data.ppid }}
|
||||
</span>
|
||||
</template>
|
||||
</Column>
|
||||
</TreeTable>
|
||||
|
||||
<div
|
||||
v-if="tooltipVisible"
|
||||
class="fixed bg-gray-800 text-white p-3 border-round-sm z-5 max-w-50rem shadow-2"
|
||||
:style="tooltipStyle"
|
||||
>
|
||||
<div v-for="rule in currentNode.data.uniqueRules" :key="rule.name">
|
||||
• {{ rule.name }}
|
||||
<span class="font-italic"
|
||||
>({{ rule.matchCount }} {{ rule.scope }} {{ rule.matchCount > 1 ? "matches" : "match" }})</span
|
||||
>
|
||||
<LibraryTag v-if="rule.lib" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, computed, onMounted, onUnmounted } from "vue";
|
||||
import TreeTable from "primevue/treetable";
|
||||
import Column from "primevue/column";
|
||||
import LibraryTag from "@/components/misc/LibraryTag.vue";
|
||||
|
||||
const props = defineProps({
|
||||
data: {
|
||||
type: Object,
|
||||
required: true
|
||||
},
|
||||
showLibraryRules: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
});
|
||||
|
||||
const filters = ref({});
|
||||
const expandedKeys = ref({});
|
||||
const tooltipVisible = ref(false);
|
||||
const currentNode = ref(null);
|
||||
const tooltipStyle = ref({
|
||||
position: "fixed",
|
||||
top: "0px",
|
||||
left: "0px"
|
||||
});
|
||||
|
||||
const getProcessIds = (location) => {
|
||||
if (!location || location.type === "no address") {
|
||||
return null;
|
||||
}
|
||||
if (Array.isArray(location.value) && location.value.length >= 2) {
|
||||
return {
|
||||
ppid: location.value[0],
|
||||
pid: location.value[1]
|
||||
};
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
const processTree = computed(() => {
|
||||
if (
|
||||
!props.data ||
|
||||
!props.data.meta ||
|
||||
!props.data.meta.analysis ||
|
||||
!props.data.meta.analysis.layout ||
|
||||
!props.data.meta.analysis.layout.processes
|
||||
) {
|
||||
console.error("Invalid data structure");
|
||||
return [];
|
||||
}
|
||||
|
||||
const processes = props.data.meta.analysis.layout.processes;
|
||||
const rules = props.data.rules || {};
|
||||
const processMap = new Map();
|
||||
|
||||
// create all process nodes
|
||||
processes.forEach((process) => {
|
||||
if (!process.address || !Array.isArray(process.address.value) || process.address.value.length < 2) {
|
||||
console.warn("Invalid process structure", process);
|
||||
return;
|
||||
}
|
||||
const [ppid, pid] = process.address.value;
|
||||
processMap.set(pid, {
|
||||
key: `process-${pid}`,
|
||||
data: {
|
||||
processname: process.name || "<Unknown Process>",
|
||||
pid,
|
||||
ppid,
|
||||
uniqueMatchCount: 0,
|
||||
uniqueRules: new Map()
|
||||
},
|
||||
children: []
|
||||
});
|
||||
});
|
||||
|
||||
// build the tree structure and add rule matches
|
||||
Object.entries(rules).forEach(([ruleName, rule]) => {
|
||||
if (!props.showLibraryRules && rule.meta && rule.meta.lib) return;
|
||||
if (!rule.matches || !Array.isArray(rule.matches)) return;
|
||||
|
||||
rule.matches.forEach((match) => {
|
||||
if (!Array.isArray(match) || match.length === 0) return;
|
||||
const [location] = match;
|
||||
const ids = getProcessIds(location);
|
||||
if (ids && processMap.has(ids.pid)) {
|
||||
const processNode = processMap.get(ids.pid);
|
||||
if (!processNode.data.uniqueRules.has(ruleName)) {
|
||||
processNode.data.uniqueMatchCount++;
|
||||
processNode.data.uniqueRules.set(ruleName, {
|
||||
name: ruleName,
|
||||
lib: rule.meta && rule.meta.lib,
|
||||
matchCount: 0,
|
||||
scope: location.type
|
||||
});
|
||||
}
|
||||
processNode.data.uniqueRules.get(ruleName).matchCount++;
|
||||
}
|
||||
});
|
||||
});
|
||||
// build the final tree structure
|
||||
const rootProcesses = [];
|
||||
processMap.forEach((processNode) => {
|
||||
processNode.data.uniqueRules = Array.from(processNode.data.uniqueRules.values());
|
||||
const parentProcess = processMap.get(processNode.data.ppid);
|
||||
if (parentProcess) {
|
||||
parentProcess.children.push(processNode);
|
||||
} else {
|
||||
rootProcesses.push(processNode);
|
||||
}
|
||||
});
|
||||
|
||||
return rootProcesses;
|
||||
});
|
||||
|
||||
const getColorForId = (id) => {
|
||||
if (id === undefined || id === null) return "black";
|
||||
const hue = Math.abs((id * 41) % 360);
|
||||
return `hsl(${hue}, 70%, 40%)`;
|
||||
};
|
||||
|
||||
const showTooltip = (event, node) => {
|
||||
if (node.data.uniqueMatchCount > 0) {
|
||||
currentNode.value = node;
|
||||
tooltipVisible.value = true;
|
||||
updateTooltipPosition(event);
|
||||
}
|
||||
};
|
||||
|
||||
const hideTooltip = () => {
|
||||
tooltipVisible.value = false;
|
||||
currentNode.value = null;
|
||||
};
|
||||
|
||||
const updateTooltipPosition = (event) => {
|
||||
const offset = 10;
|
||||
tooltipStyle.value = {
|
||||
position: "fixed",
|
||||
top: `${event.clientY + offset}px`,
|
||||
left: `${event.clientX + offset}px`
|
||||
};
|
||||
};
|
||||
|
||||
const handleMouseMove = (event) => {
|
||||
if (tooltipVisible.value) {
|
||||
updateTooltipPosition(event);
|
||||
}
|
||||
};
|
||||
|
||||
onMounted(() => {
|
||||
document.addEventListener("mousemove", handleMouseMove);
|
||||
});
|
||||
|
||||
onUnmounted(() => {
|
||||
document.removeEventListener("mousemove", handleMouseMove);
|
||||
});
|
||||
</script>
|
||||
363
web/explorer/src/components/RuleMatchesTable.vue
Normal file
363
web/explorer/src/components/RuleMatchesTable.vue
Normal file
@@ -0,0 +1,363 @@
|
||||
<template>
|
||||
<TreeTable
|
||||
:value="filteredTreeData"
|
||||
v-model:expandedKeys="expandedKeys"
|
||||
size="small"
|
||||
scrollable
|
||||
:filters="filters"
|
||||
:filterMode="filterMode"
|
||||
sortField="namespace"
|
||||
:sortOrder="1"
|
||||
removableSort
|
||||
:rowHover="true"
|
||||
:indentation="1.3"
|
||||
selectionMode="single"
|
||||
@node-select="onNodeSelect"
|
||||
:pt="{
|
||||
row: ({ instance }) => ({
|
||||
oncontextmenu: (event) => onRightClick(event, instance)
|
||||
})
|
||||
}"
|
||||
>
|
||||
<template #header>
|
||||
<IconField>
|
||||
<InputIcon class="pi pi-search" />
|
||||
<InputText v-model="filters['global']" placeholder="Global search" />
|
||||
</IconField>
|
||||
</template>
|
||||
|
||||
<!-- Rule column -->
|
||||
<Column
|
||||
field="name"
|
||||
header="Rule"
|
||||
:sortable="true"
|
||||
:expander="true"
|
||||
filterMatchMode="contains"
|
||||
style="width: 38%"
|
||||
class="cursor-default"
|
||||
>
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText
|
||||
v-model="filters['name']"
|
||||
type="text"
|
||||
placeholder="Filter by rule or nested feature"
|
||||
class="w-full"
|
||||
/>
|
||||
</template>
|
||||
<template #body="{ node }">
|
||||
<RuleColumn :node="node" />
|
||||
</template>
|
||||
</Column>
|
||||
|
||||
<!-- Address/Process column -->
|
||||
<Column
|
||||
field="address"
|
||||
:header="props.data.meta.flavor === 'dynamic' ? 'Process' : 'Address'"
|
||||
filterMatchMode="contains"
|
||||
style="width: 8.5%"
|
||||
class="cursor-default"
|
||||
>
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText
|
||||
v-model="filters['address']"
|
||||
type="text"
|
||||
:placeholder="`Filter by ${props.data.meta.flavor === 'dynamic' ? 'process' : 'address'}`"
|
||||
class="w-full"
|
||||
/>
|
||||
</template>
|
||||
<template #body="{ node }">
|
||||
<span class="font-monospace text-sm"> {{ node.data.address }} </span>
|
||||
</template>
|
||||
</Column>
|
||||
|
||||
<!-- Namespace column -->
|
||||
<Column
|
||||
field="namespace"
|
||||
header="Namespace"
|
||||
sortable
|
||||
filterMatchMode="contains"
|
||||
style="width: 16%"
|
||||
class="cursor-default"
|
||||
>
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText v-model="filters['namespace']" type="text" placeholder="Filter by namespace" />
|
||||
</template>
|
||||
</Column>
|
||||
|
||||
<!-- Technique column -->
|
||||
<Column
|
||||
field="attack"
|
||||
header="ATT&CK Technique"
|
||||
sortable
|
||||
:sortField="(node) => node?.attack[0]?.technique"
|
||||
filterField="attack.0.parts"
|
||||
filterMatchMode="contains"
|
||||
style="width: 15%"
|
||||
>
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText
|
||||
v-model="filters['attack.0.parts']"
|
||||
type="text"
|
||||
placeholder="Filter by technique"
|
||||
class="w-full"
|
||||
/>
|
||||
</template>
|
||||
<template #body="{ node }">
|
||||
<div class="flex flex-wrap">
|
||||
<div v-for="(attack, index) in node.data.attack" :key="index">
|
||||
<a :href="createATTACKHref(attack)" target="_blank">
|
||||
{{ attack.technique }}
|
||||
<span class="text-500 text-sm font-normal ml-1">({{ attack.id.split(".")[0] }})</span>
|
||||
</a>
|
||||
<div v-if="attack.subtechnique" style="font-size: 0.8em; margin-left: 2em">
|
||||
<a :href="createATTACKHref(attack)" target="_blank">
|
||||
↳ {{ attack.subtechnique }}
|
||||
<span class="text-500 text-xs font-normal ml-1">({{ attack.id }})</span>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</Column>
|
||||
|
||||
<!-- MBC column -->
|
||||
<Column
|
||||
field="mbc"
|
||||
header="Malware Behavior Catalog"
|
||||
sortable
|
||||
:sortField="(node) => node?.mbc[0]?.parts[0]"
|
||||
filterField="mbc.0.parts"
|
||||
filterMatchMode="contains"
|
||||
>
|
||||
<template #filter v-if="props.showColumnFilters">
|
||||
<InputText v-model="filters['mbc.0.parts']" type="text" placeholder="Filter by MBC" class="w-full" />
|
||||
</template>
|
||||
<template #body="{ node }">
|
||||
<div class="flex flex-wrap">
|
||||
<div v-for="(mbc, index) in node.data.mbc" :key="index">
|
||||
<a :href="createMBCHref(mbc)" target="_blank">
|
||||
{{ mbc.parts.join("::") }}
|
||||
<span class="text-500 text-sm font-normal opacity-80 ml-1">[{{ mbc.id }}]</span>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</Column>
|
||||
</TreeTable>
|
||||
|
||||
<!-- Right click context menu -->
|
||||
<ContextMenu ref="menu" :model="contextMenuItems">
|
||||
<template #item="{ item, props }">
|
||||
<a v-ripple v-bind="props.action" :href="item.url" :target="item.target">
|
||||
<span v-if="item.icon !== 'vt-icon'" :class="item.icon" />
|
||||
<VTIcon v-else-if="item.icon === 'vt-icon'" />
|
||||
<span>{{ item.label }}</span>
|
||||
<i v-if="item.description" class="pi pi-info-circle text-xs" v-tooltip.right="item.description" />
|
||||
</a>
|
||||
</template>
|
||||
</ContextMenu>
|
||||
|
||||
<!-- Source code dialog -->
|
||||
<Dialog v-model:visible="sourceDialogVisible" style="width: 50vw">
|
||||
<highlightjs autodetect :code="currentSource" />
|
||||
</Dialog>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
// Used to highlight function calls in dynamic mode
|
||||
import "highlight.js/styles/stackoverflow-light.css";
|
||||
|
||||
import { ref, onMounted, computed } from "vue";
|
||||
import TreeTable from "primevue/treetable";
|
||||
import InputText from "primevue/inputtext";
|
||||
import Dialog from "primevue/dialog";
|
||||
import Column from "primevue/column";
|
||||
import IconField from "primevue/iconfield";
|
||||
import InputIcon from "primevue/inputicon";
|
||||
import ContextMenu from "primevue/contextmenu";
|
||||
|
||||
import RuleColumn from "@/components/columns/RuleColumn.vue";
|
||||
import VTIcon from "@/components/misc/VTIcon.vue";
|
||||
|
||||
import { parseRules } from "@/utils/rdocParser";
|
||||
import { createMBCHref, createATTACKHref, createCapaRulesUrl, createVirusTotalUrl } from "@/utils/urlHelpers";
|
||||
|
||||
const props = defineProps({
|
||||
data: {
|
||||
type: Object,
|
||||
required: true
|
||||
},
|
||||
showLibraryRules: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
},
|
||||
showColumnFilters: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
});
|
||||
|
||||
const treeData = ref([]);
|
||||
|
||||
// The `filters` ref in the setup section is used by PrimeVue to maintain the overall filter
|
||||
// state of the table. Each column's filter contributes to this overall state.
|
||||
const filters = ref({});
|
||||
|
||||
const filterMode = ref("lenient");
|
||||
const sourceDialogVisible = ref(false);
|
||||
const currentSource = ref("");
|
||||
|
||||
// expandedKeys keeps track of the nodes that are expanded
|
||||
// for example, if a node with key "0" is expanded (and its first child is also expanded), expandedKeys will be { "0": true, "0-0": true }
|
||||
// if the entire tree is collapsed expandedKeys will be {}
|
||||
const expandedKeys = ref({});
|
||||
|
||||
// selectedNode is used as placeholder for the node that is right-clicked
|
||||
const menu = ref();
|
||||
const selectedNode = ref({});
|
||||
const contextMenuItems = computed(() => [
|
||||
{
|
||||
label: "Copy rule name",
|
||||
icon: "pi pi-copy",
|
||||
command: () => {
|
||||
navigator.clipboard.writeText(selectedNode.value.data?.name);
|
||||
}
|
||||
},
|
||||
{
|
||||
label: "View source",
|
||||
icon: "pi pi-eye",
|
||||
command: () => {
|
||||
showSource(selectedNode.value.data?.source);
|
||||
}
|
||||
},
|
||||
{
|
||||
label: "View rule in capa-rules",
|
||||
icon: "pi pi-external-link",
|
||||
target: "_blank",
|
||||
url: createCapaRulesUrl(selectedNode.value, props.data.meta.version)
|
||||
},
|
||||
{
|
||||
label: "Lookup rule in VirusTotal",
|
||||
icon: "vt-icon",
|
||||
target: "_blank",
|
||||
description: "Requires VirusTotal Premium account",
|
||||
url: createVirusTotalUrl(selectedNode.value.data?.name)
|
||||
}
|
||||
]);
|
||||
|
||||
const onRightClick = (event, instance) => {
|
||||
if (instance.node.data.source) {
|
||||
// We only enable right-click context menu on rows that have
|
||||
// a source field (i.e. rules and `- match` features)
|
||||
selectedNode.value = instance.node;
|
||||
|
||||
// show the context menu
|
||||
console.log(menu);
|
||||
menu.value.show(event);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Handles the expansion and collapse of nodes
|
||||
*
|
||||
* @param {Object} node - The selected node
|
||||
*
|
||||
* @example
|
||||
* // Expanding a rule node
|
||||
* onNodeSelect({
|
||||
* key: '3',
|
||||
* data: { type: 'rule', name: 'test rule', namespace: 'namespace', ... }
|
||||
* children: [
|
||||
* {
|
||||
* key: '3-0',
|
||||
* data: { type: 'match location', name: 'function @ 0x1000', namespace: null, ... }
|
||||
* children: []
|
||||
* }
|
||||
* ]
|
||||
* });
|
||||
* // Result: expandedKeys.value = { '3': true, '3-0': true }
|
||||
*/
|
||||
const onNodeSelect = (node) => {
|
||||
const nodeKey = node.key;
|
||||
const nodeType = node.data.type;
|
||||
|
||||
// We only expand rule and match locations, otherwise return
|
||||
if (nodeType !== "rule" && nodeType !== "match location") return;
|
||||
|
||||
// If the node is already expanded, collapse it
|
||||
if (expandedKeys.value[nodeKey]) {
|
||||
delete expandedKeys.value[nodeKey];
|
||||
return;
|
||||
}
|
||||
|
||||
if (nodeType === "rule") {
|
||||
// For rule nodes, clear existing expanded keys and set the clicked rule as expanded
|
||||
// and expand the first (child) match by default
|
||||
expandedKeys.value = { [nodeKey]: true, [`${nodeKey}-0`]: true };
|
||||
} else if (nodeType === "match location") {
|
||||
// For match location nodes, we need to keep the parent expanded
|
||||
// and toggle the clicked node while collapsing siblings
|
||||
const [parentKey] = nodeKey.split("-");
|
||||
expandedKeys.value = { [parentKey]: true, [`${nodeKey}`]: true };
|
||||
}
|
||||
};
|
||||
|
||||
// Filter out the treeData for showing/hiding lib rules
|
||||
const filteredTreeData = computed(() => {
|
||||
if (props.showLibraryRules) {
|
||||
return treeData.value; // Return all data when showLibraryRules is true
|
||||
} else {
|
||||
// Filter out library rules when showLibraryRules is false
|
||||
const filterNode = (node) => {
|
||||
if (node.data && node.data.lib) {
|
||||
return false;
|
||||
}
|
||||
if (node.children) {
|
||||
node.children = node.children.filter(filterNode);
|
||||
}
|
||||
return true;
|
||||
};
|
||||
return treeData.value.filter(filterNode);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Sets the source code of a node in the dialog.
|
||||
*
|
||||
* @param {string} source - The source code to be displayed.
|
||||
*/
|
||||
const showSource = (source) => {
|
||||
currentSource.value = source;
|
||||
sourceDialogVisible.value = true;
|
||||
};
|
||||
|
||||
onMounted(() => {
|
||||
treeData.value = parseRules(props.data.rules, props.data.meta.flavor, props.data.meta.analysis.layout);
|
||||
});
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
/* Disable the toggle button for statement and features */
|
||||
:deep(
|
||||
.p-treetable-tbody
|
||||
> tr:not(:is([aria-level="1"], [aria-level="2"]))
|
||||
> td
|
||||
> div
|
||||
> .p-treetable-node-toggle-button
|
||||
) {
|
||||
visibility: hidden !important;
|
||||
height: 1.3rem;
|
||||
}
|
||||
|
||||
/* Make all matches nodes (i.e. not rule names) slightly smaller,
|
||||
and tighten up the spacing between the rows */
|
||||
:deep(.p-treetable-tbody > tr:not([aria-level="1"]) > td) {
|
||||
font-size: 0.95rem;
|
||||
padding: 0rem 0.5rem !important;
|
||||
}
|
||||
|
||||
/* Optional: Add a subtle background to root-level rows for better distinction */
|
||||
:deep(.p-treetable-tbody > tr[aria-level="1"]) {
|
||||
background-color: #f9f9f9;
|
||||
}
|
||||
</style>
|
||||
91
web/explorer/src/components/SettingsPanel.vue
Normal file
91
web/explorer/src/components/SettingsPanel.vue
Normal file
@@ -0,0 +1,91 @@
|
||||
<template>
|
||||
<Card>
|
||||
<template #content>
|
||||
<div class="flex align-items-center flex-wrap gap-3">
|
||||
<div class="flex flex-row align-items-center gap-2">
|
||||
<Checkbox
|
||||
v-model="showCapabilitiesByFunctionOrProcess"
|
||||
inputId="showCapabilitiesByFunctionOrProcess"
|
||||
:binary="true"
|
||||
:disabled="showNamespaceChart"
|
||||
/>
|
||||
<label for="showCapabilitiesByFunctionOrProcess">{{ capabilitiesLabel }}</label>
|
||||
</div>
|
||||
<div class="flex flex-row align-items-center gap-2">
|
||||
<Checkbox
|
||||
v-model="showLibraryRules"
|
||||
inputId="showLibraryRules"
|
||||
:binary="true"
|
||||
:disabled="showNamespaceChart || libraryRuleMatchesCount === 0"
|
||||
/>
|
||||
<label for="showLibraryRules">
|
||||
<span v-if="libraryRuleMatchesCount > 1">
|
||||
Show {{ libraryRuleMatchesCount }} distinct library rules
|
||||
</span>
|
||||
<span v-else-if="libraryRuleMatchesCount === 1">Show 1 distinct library rule</span>
|
||||
<span v-else>No library rules matched</span>
|
||||
</label>
|
||||
</div>
|
||||
<div class="flex flex-row align-items-center gap-2">
|
||||
<Checkbox v-model="showNamespaceChart" inputId="showNamespaceChart" :binary="true" />
|
||||
<label for="showNamespaceChart"> Show namespace chart </label>
|
||||
</div>
|
||||
<div class="flex flex-row align-items-center gap-2">
|
||||
<Checkbox
|
||||
v-model="showColumnFilters"
|
||||
inputId="showColumnFilters"
|
||||
:binary="true"
|
||||
:disabled="showNamespaceChart"
|
||||
/>
|
||||
<label for="showColumnFilters"> Show column filters </label>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</Card>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, watch } from "vue";
|
||||
import Checkbox from "primevue/checkbox";
|
||||
|
||||
const props = defineProps({
|
||||
flavor: {
|
||||
type: String,
|
||||
required: true
|
||||
},
|
||||
libraryRuleMatchesCount: {
|
||||
type: Number,
|
||||
required: true
|
||||
}
|
||||
});
|
||||
|
||||
const showCapabilitiesByFunctionOrProcess = ref(false);
|
||||
const showLibraryRules = ref(false);
|
||||
const showNamespaceChart = ref(false);
|
||||
const showColumnFilters = ref(false);
|
||||
|
||||
const emit = defineEmits([
|
||||
"update:show-capabilities-by-function-or-process",
|
||||
"update:show-library-rules",
|
||||
"update:show-namespace-chart",
|
||||
"update:show-column-filters"
|
||||
]);
|
||||
|
||||
const capabilitiesLabel = props.flavor === "static" ? "Show capabilities by function" : "Show capabilities by process";
|
||||
|
||||
watch(showCapabilitiesByFunctionOrProcess, (newValue) => {
|
||||
emit("update:show-capabilities-by-function-or-process", newValue);
|
||||
});
|
||||
|
||||
watch(showLibraryRules, (newValue) => {
|
||||
emit("update:show-library-rules", newValue);
|
||||
});
|
||||
|
||||
watch(showNamespaceChart, (newValue) => {
|
||||
emit("update:show-namespace-chart", newValue);
|
||||
});
|
||||
|
||||
watch(showColumnFilters, (newValue) => {
|
||||
emit("update:show-column-filters", newValue);
|
||||
});
|
||||
</script>
|
||||
91
web/explorer/src/components/UploadOptions.vue
Normal file
91
web/explorer/src/components/UploadOptions.vue
Normal file
@@ -0,0 +1,91 @@
|
||||
<template>
|
||||
<Card>
|
||||
<template #content>
|
||||
<div class="flex flex-wrap align-items-center justify-content-center gap-3">
|
||||
<div class="flex-grow-1 flex align-items-center justify-content-center">
|
||||
<FileUpload
|
||||
mode="basic"
|
||||
name="model[]"
|
||||
accept=".json,.gz"
|
||||
:max-file-size="10000000"
|
||||
:auto="true"
|
||||
:custom-upload="true"
|
||||
choose-label="Upload from local"
|
||||
@uploader="$emit('load-from-local', $event)"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<Divider layout="vertical" class="hidden-mobile">
|
||||
<b>OR</b>
|
||||
</Divider>
|
||||
<Divider layout="horizontal" class="visible-mobile" align="center">
|
||||
<b>OR</b>
|
||||
</Divider>
|
||||
|
||||
<div class="flex-grow-1 flex align-items-center justify-content-center gap-2">
|
||||
<FloatLabel>
|
||||
<InputText id="url" type="text" v-model="loadURL" />
|
||||
<label for="url">Load from URL</label>
|
||||
</FloatLabel>
|
||||
<Button icon="pi pi-arrow-right" @click="$emit('load-from-url', loadURL)" :disabled="!loadURL" />
|
||||
</div>
|
||||
|
||||
<Divider layout="vertical" class="hidden-mobile">
|
||||
<b>OR</b>
|
||||
</Divider>
|
||||
<Divider layout="horizontal" class="visible-mobile" align="center">
|
||||
<b>OR</b>
|
||||
</Divider>
|
||||
|
||||
<div class="flex-grow-1 flex align-items-center justify-content-center">
|
||||
<Button label="Preview Static" @click="$emit('load-demo-static')" class="p-button" />
|
||||
</div>
|
||||
|
||||
<Divider layout="vertical" class="hidden-mobile">
|
||||
<b>OR</b>
|
||||
</Divider>
|
||||
<Divider layout="horizontal" class="visible-mobile" align="center">
|
||||
<b>OR</b>
|
||||
</Divider>
|
||||
|
||||
<div class="flex-grow-1 flex align-items-center justify-content-center">
|
||||
<Button label="Preview Dynamic" @click="$emit('load-demo-dynamic')" class="p-button" />
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</Card>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref } from "vue";
|
||||
import Card from "primevue/card";
|
||||
import FileUpload from "primevue/fileupload";
|
||||
import Divider from "primevue/divider";
|
||||
import FloatLabel from "primevue/floatlabel";
|
||||
import InputText from "primevue/inputtext";
|
||||
import Button from "primevue/button";
|
||||
|
||||
const loadURL = ref("");
|
||||
|
||||
defineEmits(["load-from-local", "load-from-url", "load-demo-static", "load-demo-dynamic"]);
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
@media screen and (min-width: 769px) {
|
||||
.hidden-mobile {
|
||||
display: flex !important;
|
||||
}
|
||||
.visible-mobile {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 768px) {
|
||||
.hidden-mobile {
|
||||
display: none !important;
|
||||
}
|
||||
.visible-mobile {
|
||||
display: flex !important;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
82
web/explorer/src/components/columns/RuleColumn.vue
Normal file
82
web/explorer/src/components/columns/RuleColumn.vue
Normal file
@@ -0,0 +1,82 @@
|
||||
<template>
|
||||
<div class="cursor-default">
|
||||
<!-- example node: "parse PE headers (2 matches) lib" -->
|
||||
<template v-if="node.data.type === 'rule'">
|
||||
<div>
|
||||
<span>{{ node.data.name }}</span>
|
||||
<span v-if="node.data.matchCount > 1" class="font-italic"> ({{ node.data.matchCount }} matches) </span>
|
||||
<LibraryTag v-if="node.data.lib && node.data.matchCount" />
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<!-- example node: "basic block @ 0x401000" or "explorer.exe" -->
|
||||
<template v-else-if="node.data.type === 'match location'">
|
||||
<span class="text-sm font-italic">{{ node.data.name }}</span>
|
||||
</template>
|
||||
|
||||
<!-- example node: "- or", "- and" -->
|
||||
<template v-else-if="node.data.type === 'statement'"
|
||||
>-
|
||||
<span
|
||||
:class="{
|
||||
'text-green-700': node.data.typeValue === 'range',
|
||||
'font-semibold': node.data.typeValue !== 'range'
|
||||
}"
|
||||
>
|
||||
{{ node.data.name }}
|
||||
</span>
|
||||
</template>
|
||||
|
||||
<!-- example node: "- api: GetProcAddress", "- regex: .*\\.exe" -->
|
||||
<template v-else-if="node.data.type === 'feature'">
|
||||
<span>
|
||||
- {{ node.data.typeValue }}:
|
||||
<span
|
||||
:class="{ 'text-green-700': node.data.typeValue !== 'regex' }"
|
||||
class="font-monospace"
|
||||
v-tooltip.top="{
|
||||
value: getTooltipContent(node.data),
|
||||
showDelay: 1000,
|
||||
hideDelay: 300
|
||||
}"
|
||||
>
|
||||
{{ node.data.name }}
|
||||
</span>
|
||||
</span>
|
||||
</template>
|
||||
|
||||
<!-- example node: "- malware.exe" (these are the captures (i.e. children nodes) of regex nodes) -->
|
||||
<template v-else-if="node.data.type === 'regex-capture'">
|
||||
- <span class="text-green-700 font-monospace">{{ node.data.name }}</span>
|
||||
</template>
|
||||
|
||||
<!-- example node: "exit(0) -> 0" (if the node type is call-info, we highlight node.data.name.callInfo) -->
|
||||
<template v-else-if="node.data.type === 'call-info'">
|
||||
<highlightjs lang="c" :code="node.data.name.callInfo" />
|
||||
</template>
|
||||
|
||||
<!-- example node: " = IMAGE_NT_SIGNATURE (PE)" -->
|
||||
<span v-if="node.data.description" class="text-gray-500 text-sm" style="font-size: 90%">
|
||||
= {{ node.data.description }}
|
||||
</span>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import LibraryTag from "@/components/misc/LibraryTag.vue";
|
||||
|
||||
defineProps({
|
||||
node: {
|
||||
type: Object,
|
||||
required: true
|
||||
}
|
||||
});
|
||||
|
||||
const getTooltipContent = (data) => {
|
||||
if (data.typeValue === "number" || data.typeValue === "offset") {
|
||||
const decimalValue = parseInt(data.name, 16);
|
||||
return `Decimal: ${decimalValue}`;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
</script>
|
||||
13
web/explorer/src/components/misc/LibraryTag.vue
Normal file
13
web/explorer/src/components/misc/LibraryTag.vue
Normal file
@@ -0,0 +1,13 @@
|
||||
<template>
|
||||
<Tag
|
||||
class="ml-2"
|
||||
style="scale: 0.8"
|
||||
value="lib"
|
||||
severity="info"
|
||||
v-tooltip.right="'Library rules capture common logic'"
|
||||
/>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import Tag from "primevue/tag";
|
||||
</script>
|
||||
5
web/explorer/src/components/misc/VTIcon.vue
Normal file
5
web/explorer/src/components/misc/VTIcon.vue
Normal file
@@ -0,0 +1,5 @@
|
||||
<template>
|
||||
<svg width="14" height="14" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M10.87 12L0 22.68h24V1.32H0zm10.73 8.52H5.28l8.637-8.448L5.28 3.48H21.6z" fill="currentColor" />
|
||||
</svg>
|
||||
</template>
|
||||
88
web/explorer/src/composables/useRdocLoader.js
Normal file
88
web/explorer/src/composables/useRdocLoader.js
Normal file
@@ -0,0 +1,88 @@
|
||||
import { useToast } from "primevue/usetoast";
|
||||
import { isGzipped, decompressGzip, readFileAsText } from "@/utils/fileUtils";
|
||||
|
||||
export function useRdocLoader() {
|
||||
const toast = useToast();
|
||||
const MIN_SUPPORTED_VERSION = "7.0.0";
|
||||
|
||||
/**
|
||||
* Checks if the loaded rdoc version is supported
|
||||
* @param {Object} rdoc - The loaded JSON rdoc data
|
||||
* @returns {boolean} - True if version is supported, false otherwise
|
||||
*/
|
||||
const checkVersion = (rdoc) => {
|
||||
const version = rdoc.meta.version;
|
||||
if (version < MIN_SUPPORTED_VERSION) {
|
||||
console.error(
|
||||
`Version ${version} is not supported. Please use version ${MIN_SUPPORTED_VERSION} or higher.`
|
||||
);
|
||||
toast.add({
|
||||
severity: "error",
|
||||
summary: "Unsupported Version",
|
||||
detail: `Version ${version} is not supported. Please use version ${MIN_SUPPORTED_VERSION} or higher.`,
|
||||
life: 5000,
|
||||
group: "bc" // bottom-center
|
||||
});
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Loads JSON rdoc data from various sources
|
||||
* @param {File|string|Object} source - File object, URL string, or JSON object
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const loadRdoc = async (source) => {
|
||||
try {
|
||||
let data;
|
||||
|
||||
if (typeof source === "string") {
|
||||
// Load from URL
|
||||
const response = await fetch(source);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
data = await response.json();
|
||||
} else if (source instanceof File) {
|
||||
let fileContent;
|
||||
if (await isGzipped(source)) {
|
||||
fileContent = await decompressGzip(source);
|
||||
} else {
|
||||
fileContent = await readFileAsText(source);
|
||||
}
|
||||
data = JSON.parse(fileContent);
|
||||
} else if (typeof source === "object") {
|
||||
// Direct JSON object (Preview options)
|
||||
data = source;
|
||||
} else {
|
||||
throw new Error("Invalid source type");
|
||||
}
|
||||
|
||||
if (checkVersion(data)) {
|
||||
toast.add({
|
||||
severity: "success",
|
||||
summary: "Success",
|
||||
detail: "JSON data loaded successfully",
|
||||
life: 3000,
|
||||
group: "bc" // bottom-center
|
||||
});
|
||||
return data;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error loading JSON:", error);
|
||||
toast.add({
|
||||
severity: "error",
|
||||
summary: "Error",
|
||||
detail: "Failed to process the file. Please ensure it's a valid JSON or gzipped JSON file.",
|
||||
life: 3000,
|
||||
group: "bc" // bottom-center
|
||||
});
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
return {
|
||||
loadRdoc
|
||||
};
|
||||
}
|
||||
88
web/explorer/src/main.js
Normal file
88
web/explorer/src/main.js
Normal file
@@ -0,0 +1,88 @@
|
||||
import "primeicons/primeicons.css";
|
||||
import "./assets/main.css";
|
||||
|
||||
import "highlight.js/styles/default.css";
|
||||
import "primeflex/primeflex.css";
|
||||
import "primeflex/themes/primeone-light.css";
|
||||
|
||||
import "highlight.js/lib/common";
|
||||
import hljsVuePlugin from "@highlightjs/vue-plugin";
|
||||
|
||||
import { createApp } from "vue";
|
||||
import PrimeVue from "primevue/config";
|
||||
import Ripple from "primevue/ripple";
|
||||
import Aura from "@primevue/themes/aura";
|
||||
import App from "./App.vue";
|
||||
import MenuBar from "primevue/menubar";
|
||||
import Card from "primevue/card";
|
||||
import Panel from "primevue/panel";
|
||||
import Column from "primevue/column";
|
||||
import Checkbox from "primevue/checkbox";
|
||||
import FloatLabel from "primevue/floatlabel";
|
||||
import Tooltip from "primevue/tooltip";
|
||||
import Divider from "primevue/divider";
|
||||
import ContextMenu from "primevue/contextmenu";
|
||||
import ToastService from "primevue/toastservice";
|
||||
import Toast from "primevue/toast";
|
||||
import router from "./router";
|
||||
|
||||
import { definePreset } from "@primevue/themes";
|
||||
|
||||
const Noir = definePreset(Aura, {
|
||||
semantic: {
|
||||
primary: {
|
||||
50: "{zinc.50}",
|
||||
100: "{zinc.100}",
|
||||
200: "{zinc.200}",
|
||||
300: "{zinc.300}",
|
||||
400: "{zinc.400}",
|
||||
500: "{zinc.500}",
|
||||
600: "{zinc.600}",
|
||||
700: "{zinc.700}",
|
||||
800: "{zinc.800}",
|
||||
900: "{zinc.900}",
|
||||
950: "{zinc.950}"
|
||||
},
|
||||
colorScheme: {
|
||||
light: {
|
||||
primary: {
|
||||
color: "{slate.800}",
|
||||
inverseColor: "#ffffff",
|
||||
hoverColor: "{zinc.900}",
|
||||
activeColor: "{zinc.800}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const app = createApp(App);
|
||||
|
||||
app.use(router);
|
||||
app.use(hljsVuePlugin);
|
||||
|
||||
app.use(PrimeVue, {
|
||||
theme: {
|
||||
preset: Noir,
|
||||
options: {
|
||||
darkModeSelector: "light"
|
||||
}
|
||||
},
|
||||
ripple: true
|
||||
});
|
||||
app.use(ToastService);
|
||||
|
||||
app.directive("tooltip", Tooltip);
|
||||
app.directive("ripple", Ripple);
|
||||
|
||||
app.component("Card", Card);
|
||||
app.component("Divider", Divider);
|
||||
app.component("Toast", Toast);
|
||||
app.component("Panel", Panel);
|
||||
app.component("MenuBar", MenuBar);
|
||||
app.component("Checkbox", Checkbox);
|
||||
app.component("FloatLabel", FloatLabel);
|
||||
app.component("Column", Column);
|
||||
app.component("ContextMenu", ContextMenu);
|
||||
|
||||
app.mount("#app");
|
||||
39
web/explorer/src/router/index.js
Normal file
39
web/explorer/src/router/index.js
Normal file
@@ -0,0 +1,39 @@
|
||||
import { createRouter, createWebHashHistory } from "vue-router";
|
||||
import ImportView from "@/views/ImportView.vue";
|
||||
import NotFoundView from "@/views/NotFoundView.vue";
|
||||
import AnalysisView from "@/views/AnalysisView.vue";
|
||||
|
||||
import { rdocStore } from "@/store/rdocStore";
|
||||
|
||||
const router = createRouter({
|
||||
history: createWebHashHistory(import.meta.env.BASE_URL),
|
||||
routes: [
|
||||
{
|
||||
path: "/",
|
||||
name: "home",
|
||||
component: ImportView
|
||||
},
|
||||
{
|
||||
path: "/analysis",
|
||||
name: "analysis",
|
||||
component: AnalysisView,
|
||||
beforeEnter: (to, from, next) => {
|
||||
if (rdocStore.data.value === null) {
|
||||
// No rdoc loaded, redirect to home page
|
||||
next({ name: "home" });
|
||||
} else {
|
||||
// rdoc is loaded, proceed to analysis page
|
||||
next();
|
||||
}
|
||||
}
|
||||
},
|
||||
// 404 Route - This should be the last route
|
||||
{
|
||||
path: "/:pathMatch(.*)*",
|
||||
name: "NotFound",
|
||||
component: NotFoundView
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
export default router;
|
||||
11
web/explorer/src/store/rdocStore.js
Normal file
11
web/explorer/src/store/rdocStore.js
Normal file
@@ -0,0 +1,11 @@
|
||||
import { ref } from "vue";
|
||||
|
||||
export const rdocStore = {
|
||||
data: ref(null),
|
||||
setData(newData) {
|
||||
this.data.value = newData;
|
||||
},
|
||||
clearData() {
|
||||
this.data.value = null;
|
||||
}
|
||||
};
|
||||
286
web/explorer/src/tests/rdocParser.test.js
Normal file
286
web/explorer/src/tests/rdocParser.test.js
Normal file
@@ -0,0 +1,286 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { parseRules, parseFunctionCapabilities } from "../utils/rdocParser";
|
||||
|
||||
describe("parseRules", () => {
|
||||
it("should return an empty array for empty rules", () => {
|
||||
const rules = {};
|
||||
const flavor = "static";
|
||||
const layout = {};
|
||||
const result = parseRules(rules, flavor, layout);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it("should correctly parse a simple rule with static scope", () => {
|
||||
const rules = {
|
||||
"test rule": {
|
||||
meta: {
|
||||
name: "test rule",
|
||||
namespace: "test",
|
||||
lib: false,
|
||||
scopes: {
|
||||
static: "function",
|
||||
dynamic: "process"
|
||||
}
|
||||
},
|
||||
source: "test rule source",
|
||||
matches: [
|
||||
[
|
||||
{ type: "absolute", value: 0x1000 },
|
||||
{
|
||||
success: true,
|
||||
node: { type: "feature", feature: { type: "api", api: "TestAPI" } },
|
||||
children: [],
|
||||
locations: [{ type: "absolute", value: 0x1000 }],
|
||||
captures: {}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
};
|
||||
const result = parseRules(rules, "static", {});
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].key).toBe("0");
|
||||
expect(result[0].data.type).toBe("rule");
|
||||
expect(result[0].data.name).toBe("test rule");
|
||||
expect(result[0].data.lib).toBe(false);
|
||||
expect(result[0].data.namespace).toBe("test");
|
||||
expect(result[0].data.source).toBe("test rule source");
|
||||
expect(result[0].children).toHaveLength(1);
|
||||
expect(result[0].children[0].key).toBe("0-0");
|
||||
expect(result[0].children[0].data.type).toBe("match location");
|
||||
expect(result[0].children[0].children[0].data.type).toBe("feature");
|
||||
expect(result[0].children[0].children[0].data.typeValue).toBe("api");
|
||||
expect(result[0].children[0].children[0].data.name).toBe("TestAPI");
|
||||
});
|
||||
|
||||
it('should handle rule with "not" statements correctly', () => {
|
||||
const rules = {
|
||||
"test rule": {
|
||||
meta: {
|
||||
name: "test rule",
|
||||
namespace: "test",
|
||||
lib: false,
|
||||
scopes: {
|
||||
static: "function",
|
||||
dynamic: "process"
|
||||
}
|
||||
},
|
||||
source: "test rule source",
|
||||
matches: [
|
||||
[
|
||||
{ type: "absolute", value: 0x1000 },
|
||||
{
|
||||
success: true,
|
||||
node: { type: "statement", statement: { type: "not" } },
|
||||
children: [
|
||||
{ success: false, node: { type: "feature", feature: { type: "api", api: "TestAPI" } } }
|
||||
]
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
};
|
||||
const result = parseRules(rules, "static", {});
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].children[0].children[0].data.type).toBe("statement");
|
||||
expect(result[0].children[0].children[0].data.name).toBe("not:");
|
||||
expect(result[0].children[0].children[0].children[0].data.type).toBe("feature");
|
||||
expect(result[0].children[0].children[0].children[0].data.typeValue).toBe("api");
|
||||
expect(result[0].children[0].children[0].children[0].data.name).toBe("TestAPI");
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseFunctionCapabilities", () => {
|
||||
it("should return an empty array when no functions match", () => {
|
||||
const mockData = {
|
||||
meta: {
|
||||
analysis: {
|
||||
feature_counts: {
|
||||
file: 0,
|
||||
functions: []
|
||||
},
|
||||
layout: {
|
||||
functions: []
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {}
|
||||
};
|
||||
const result = parseFunctionCapabilities(mockData, false);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it("should parse a single function with one rule match", () => {
|
||||
const mockDoc = {
|
||||
meta: {
|
||||
analysis: {
|
||||
layout: {
|
||||
functions: [
|
||||
{
|
||||
address: { type: "absolute", value: 0x1000 },
|
||||
matched_basic_blocks: [{ address: { type: "absolute", value: 0x1000 } }]
|
||||
}
|
||||
]
|
||||
},
|
||||
feature_counts: {
|
||||
functions: [{ address: { type: "absolute", value: 0x1000 } }]
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
rule1: {
|
||||
meta: {
|
||||
name: "Test Rule",
|
||||
namespace: "test",
|
||||
lib: false,
|
||||
scopes: { static: "function" }
|
||||
},
|
||||
matches: [[{ type: "absolute", value: 0x1000 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
const result = parseFunctionCapabilities(mockDoc);
|
||||
expect(result).toEqual([
|
||||
{
|
||||
address: "0x1000",
|
||||
capabilities: [{ name: "Test Rule", namespace: "test", lib: false }]
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it("should handle multiple rules matching a single function", () => {
|
||||
const mockDoc = {
|
||||
meta: {
|
||||
analysis: {
|
||||
layout: {
|
||||
functions: [
|
||||
{
|
||||
address: { type: "absolute", value: 0x1000 },
|
||||
matched_basic_blocks: [{ address: { type: "absolute", value: 0x1000 } }]
|
||||
}
|
||||
]
|
||||
},
|
||||
feature_counts: {
|
||||
functions: [{ address: { type: "absolute", value: 0x1000 } }]
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
rule1: {
|
||||
meta: {
|
||||
name: "Test Rule 1",
|
||||
lib: true,
|
||||
scopes: { static: "function" }
|
||||
},
|
||||
matches: [[{ type: "absolute", value: 0x1000 }]]
|
||||
},
|
||||
rule2: {
|
||||
meta: {
|
||||
name: "Test Rule 2",
|
||||
namespace: "test",
|
||||
lib: false,
|
||||
scopes: { static: "function" }
|
||||
},
|
||||
matches: [[{ type: "absolute", value: 0x1000 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
const result = parseFunctionCapabilities(mockDoc);
|
||||
expect(result).toEqual([
|
||||
{
|
||||
address: "0x1000",
|
||||
capabilities: [
|
||||
{ name: "Test Rule 1", lib: true },
|
||||
{ name: "Test Rule 2", namespace: "test", lib: false }
|
||||
]
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it("should handle basic block scoped rules", () => {
|
||||
const mockDoc = {
|
||||
meta: {
|
||||
analysis: {
|
||||
layout: {
|
||||
functions: [
|
||||
{
|
||||
address: { type: "absolute", value: 0x1000 },
|
||||
matched_basic_blocks: [{ address: { type: "absolute", value: 0x1100 } }]
|
||||
}
|
||||
]
|
||||
},
|
||||
feature_counts: {
|
||||
functions: [{ address: { type: "absolute", value: 0x1000 } }]
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
rule1: {
|
||||
meta: {
|
||||
name: "Basic Block Rule",
|
||||
namespace: "test",
|
||||
lib: false,
|
||||
scopes: { static: "basic block" }
|
||||
},
|
||||
matches: [[{ type: "absolute", value: 0x1100 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
const result = parseFunctionCapabilities(mockDoc);
|
||||
expect(result).toEqual([
|
||||
{
|
||||
address: "0x1000",
|
||||
capabilities: [{ name: "Basic Block Rule", namespace: "test", lib: false }]
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it("should handle a single rule matching in multiple functions", () => {
|
||||
const mockDoc = {
|
||||
meta: {
|
||||
analysis: {
|
||||
layout: {
|
||||
functions: [
|
||||
{
|
||||
address: { type: "absolute", value: 0x1000 },
|
||||
matched_basic_blocks: [{ address: { type: "absolute", value: 0x1000 } }]
|
||||
},
|
||||
{
|
||||
address: { type: "absolute", value: 0x2000 },
|
||||
matched_basic_blocks: [{ address: { type: "absolute", value: 0x2000 } }]
|
||||
}
|
||||
]
|
||||
},
|
||||
feature_counts: {
|
||||
functions: [
|
||||
{ address: { type: "absolute", value: 0x1000 } },
|
||||
{ address: { type: "absolute", value: 0x2000 } }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
rule1: {
|
||||
meta: {
|
||||
name: "Test Rule",
|
||||
namespace: "test",
|
||||
lib: false,
|
||||
scopes: { static: "function" }
|
||||
},
|
||||
matches: [[{ type: "absolute", value: 0x1000 }], [{ type: "absolute", value: 0x2000 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
const result = parseFunctionCapabilities(mockDoc);
|
||||
expect(result).toEqual([
|
||||
{
|
||||
address: "0x1000",
|
||||
capabilities: [{ name: "Test Rule", namespace: "test", lib: false }]
|
||||
},
|
||||
{
|
||||
address: "0x2000",
|
||||
capabilities: [{ name: "Test Rule", namespace: "test", lib: false }]
|
||||
}
|
||||
]);
|
||||
});
|
||||
});
|
||||
38
web/explorer/src/utils/fileUtils.js
Normal file
38
web/explorer/src/utils/fileUtils.js
Normal file
@@ -0,0 +1,38 @@
|
||||
import pako from "pako";
|
||||
|
||||
/**
|
||||
* Checks if the given file is gzipped
|
||||
* @param {File} file - The file to check
|
||||
* @returns {Promise<boolean>} - True if the file is gzipped, false otherwise
|
||||
*/
|
||||
export const isGzipped = async (file) => {
|
||||
const arrayBuffer = await file.arrayBuffer();
|
||||
const uint8Array = new Uint8Array(arrayBuffer);
|
||||
return uint8Array[0] === 0x1f && uint8Array[1] === 0x8b;
|
||||
};
|
||||
|
||||
/**
|
||||
* Decompresses a gzipped file
|
||||
* @param {File} file - The gzipped file to decompress
|
||||
* @returns {Promise<string>} - The decompressed file content as a string
|
||||
*/
|
||||
export const decompressGzip = async (file) => {
|
||||
const arrayBuffer = await file.arrayBuffer();
|
||||
const uint8Array = new Uint8Array(arrayBuffer);
|
||||
const decompressed = pako.inflate(uint8Array, { to: "string" });
|
||||
return decompressed;
|
||||
};
|
||||
|
||||
/**
|
||||
* Reads a file as text
|
||||
* @param {File} file - The file to read
|
||||
* @returns {Promise<string>} - The file content as a string
|
||||
*/
|
||||
export const readFileAsText = (file) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = (event) => resolve(event.target.result);
|
||||
reader.onerror = (error) => reject(error);
|
||||
reader.readAsText(file);
|
||||
});
|
||||
};
|
||||
627
web/explorer/src/utils/rdocParser.js
Normal file
627
web/explorer/src/utils/rdocParser.js
Normal file
@@ -0,0 +1,627 @@
|
||||
/**
|
||||
* Parses rules data for the CapaTreeTable component
|
||||
* @param {Object} rules - The rules object from the rodc JSON data
|
||||
* @param {string} flavor - The flavor of the analysis (static or dynamic)
|
||||
* @param {Object} layout - The layout object from the rdoc JSON data
|
||||
* @param {number} [maxMatches=1] - Maximum number of matches to parse per rule
|
||||
* @returns {Array} - Parsed tree data for the TreeTable component
|
||||
*/
|
||||
export function parseRules(rules, flavor, layout, maxMatches = 1) {
|
||||
return Object.entries(rules).map(([, rule], index) => {
|
||||
const ruleNode = {
|
||||
key: `${index}`,
|
||||
data: {
|
||||
type: "rule",
|
||||
name: rule.meta.name,
|
||||
lib: rule.meta.lib,
|
||||
matchCount: rule.matches.length,
|
||||
namespace: rule.meta.namespace,
|
||||
mbc: rule.meta.mbc,
|
||||
source: rule.source,
|
||||
attack: rule.meta.attack
|
||||
}
|
||||
};
|
||||
|
||||
// Limit the number of matches to process
|
||||
// Dynamic matches can have thousands of matches, only show `maxMatches` for performance reasons
|
||||
const limitedMatches = flavor === "dynamic" ? rule.matches.slice(0, maxMatches) : rule.matches;
|
||||
|
||||
// Is this a static rule with a file-level scope?
|
||||
const isFileScope = rule.meta.scopes && rule.meta.scopes.static === "file";
|
||||
|
||||
if (isFileScope) {
|
||||
// The scope for the rule is a file, so we don't need to show the match location address
|
||||
ruleNode.children = limitedMatches.map((match, matchIndex) => {
|
||||
return parseNode(match[1], `${index}-${matchIndex}`, rules, rule.meta.lib, layout);
|
||||
});
|
||||
} else {
|
||||
// This is not a file-level match scope, we need to create intermediate nodes for each match
|
||||
ruleNode.children = limitedMatches.map((match, matchIndex) => {
|
||||
const matchKey = `${index}-${matchIndex}`;
|
||||
const matchNode = {
|
||||
key: matchKey,
|
||||
data: {
|
||||
type: "match location",
|
||||
name:
|
||||
flavor === "static"
|
||||
? `${rule.meta.scopes.static} @ ` + formatAddress(match[0])
|
||||
: getProcessName(layout, match[0])
|
||||
},
|
||||
children: [parseNode(match[1], `${matchKey}`, rules, rule.meta.lib, layout)]
|
||||
};
|
||||
return matchNode;
|
||||
});
|
||||
}
|
||||
|
||||
// Finally, add a note if there are more matches than the limit (only applicable in dynamic mode)
|
||||
if (rule.matches.length > limitedMatches.length) {
|
||||
ruleNode.children.push({
|
||||
key: `${index}`,
|
||||
data: {
|
||||
type: "match location",
|
||||
name: `... and ${rule.matches.length - maxMatches} more matches`
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return ruleNode;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the capabilities of functions from a given rdoc.
|
||||
*
|
||||
* @param {Object} doc - The document containing function and rule information.
|
||||
* @returns {Array} An array of objects, each representing a function with its address and capabilities.
|
||||
*
|
||||
* @example
|
||||
* [
|
||||
* {
|
||||
* "address": "0x14002A690",
|
||||
* "capabilities": [
|
||||
* {
|
||||
* "name": "contain loop",
|
||||
* "lib": true
|
||||
*
|
||||
* },
|
||||
* {
|
||||
* "name": "get disk information",
|
||||
* "namespace": "host-interaction/hardware/storage"
|
||||
* "lib": false
|
||||
* }
|
||||
* ]
|
||||
* }
|
||||
* ]
|
||||
*/
|
||||
export function parseFunctionCapabilities(doc) {
|
||||
// Map basic blocks to their their parent functions
|
||||
const functionsByBB = new Map();
|
||||
|
||||
for (const finfo of doc.meta.analysis.layout.functions) {
|
||||
const faddress = finfo.address;
|
||||
for (const bb of finfo.matched_basic_blocks) {
|
||||
const bbaddress = bb.address;
|
||||
functionsByBB.set(formatAddress(bbaddress), formatAddress(faddress));
|
||||
}
|
||||
}
|
||||
|
||||
// Map to store capabilities matched to each function
|
||||
const matchesByFunction = new Map();
|
||||
|
||||
// Add a special entry for file-level matches
|
||||
matchesByFunction.set("file", new Set());
|
||||
|
||||
// Iterate through all rules in the document
|
||||
for (const [, rule] of Object.entries(doc.rules)) {
|
||||
if (rule.meta.scopes.static === "function") {
|
||||
for (const [address] of rule.matches) {
|
||||
const addr = formatAddress(address);
|
||||
if (!matchesByFunction.has(addr)) {
|
||||
matchesByFunction.set(addr, new Set());
|
||||
}
|
||||
matchesByFunction
|
||||
.get(addr)
|
||||
.add({ name: rule.meta.name, namespace: rule.meta.namespace, lib: rule.meta.lib });
|
||||
}
|
||||
} else if (rule.meta.scopes.static === "basic block") {
|
||||
for (const [address] of rule.matches) {
|
||||
const addr = formatAddress(address);
|
||||
const function_ = functionsByBB.get(addr);
|
||||
if (function_) {
|
||||
if (!matchesByFunction.has(function_)) {
|
||||
matchesByFunction.set(function_, new Set());
|
||||
}
|
||||
matchesByFunction
|
||||
.get(function_)
|
||||
.add({ name: rule.meta.name, namespace: rule.meta.namespace, lib: rule.meta.lib });
|
||||
}
|
||||
}
|
||||
} else if (rule.meta.scopes.static === "file") {
|
||||
// Add file-level matches to the special 'file' entry
|
||||
matchesByFunction.get("file").add({
|
||||
name: rule.meta.name,
|
||||
namespace: rule.meta.namespace,
|
||||
lib: rule.meta.lib
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const result = [];
|
||||
|
||||
// Add file-level matches if there are any
|
||||
if (matchesByFunction.get("file").size > 0) {
|
||||
result.push({
|
||||
address: "file",
|
||||
capabilities: Array.from(matchesByFunction.get("file"))
|
||||
});
|
||||
}
|
||||
|
||||
// Iterate through all functions in the document
|
||||
for (const f of doc.meta.analysis.feature_counts.functions) {
|
||||
const addr = formatAddress(f.address);
|
||||
const matches = matchesByFunction.get(addr);
|
||||
// Skip functions with no matches (unlikely)
|
||||
if (!matches || matches.size === 0) continue;
|
||||
|
||||
// Add function to result with its address and sorted capabilities
|
||||
result.push({
|
||||
address: addr,
|
||||
capabilities: Array.from(matches)
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
/**
|
||||
* Parses a single `node` object (i.e. statement or feature) in each rule
|
||||
* @param {Object} node - The node to parse
|
||||
* @param {string} key - The key for this node
|
||||
* @param {Object} rules - The full rules object
|
||||
* @param {boolean} lib - Whether this is a library rule
|
||||
* @returns {Object} - Parsed node data
|
||||
*/
|
||||
function parseNode(node, key, rules, lib, layout) {
|
||||
if (!node) return null;
|
||||
|
||||
const isNotStatement = node.node.statement && node.node.statement.type === "not";
|
||||
const processedNode = isNotStatement ? invertNotStatementSuccess(node) : node;
|
||||
|
||||
if (!processedNode.success) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const result = {
|
||||
key: key,
|
||||
data: {
|
||||
type: processedNode.node.type, // statement or feature
|
||||
typeValue: processedNode.node.statement?.type || processedNode.node.feature?.type, // e.g., number, regex, api, or, and, optional ... etc
|
||||
success: processedNode.success,
|
||||
name: getNodeName(processedNode),
|
||||
lib: lib,
|
||||
address: getNodeAddress(processedNode),
|
||||
description: getNodeDescription(processedNode)
|
||||
},
|
||||
children: []
|
||||
};
|
||||
// Recursively parse node children (i.e., nested statements or features)
|
||||
if (processedNode.children && Array.isArray(processedNode.children)) {
|
||||
result.children = processedNode.children
|
||||
.map((child) => {
|
||||
const childNode = parseNode(child, `${key}`, rules, lib, layout);
|
||||
return childNode;
|
||||
})
|
||||
.filter((child) => child !== null);
|
||||
}
|
||||
// If this is a match node, add the rule's source code to the result.data.source object
|
||||
if (processedNode.node.feature && processedNode.node.feature.type === "match") {
|
||||
const ruleName = processedNode.node.feature.match;
|
||||
const rule = rules[ruleName];
|
||||
if (rule) {
|
||||
result.data.source = rule.source;
|
||||
}
|
||||
result.children = [];
|
||||
}
|
||||
// If this is an optional node, check if it has children. If not, return null (optional statement always evaluate to true)
|
||||
// we only render them, if they have at least one child node where node.success is true.
|
||||
if (processedNode.node.statement && processedNode.node.statement.type === "optional") {
|
||||
if (result.children.length === 0) return null;
|
||||
}
|
||||
|
||||
// regex features have captures, which we need to process and add as children
|
||||
if (processedNode.node.feature && processedNode.node.feature.type === "regex") {
|
||||
result.children = processRegexCaptures(processedNode, key);
|
||||
}
|
||||
|
||||
// Add call information for dynamic sandbox traces when the feature is `api`
|
||||
if (processedNode.node.feature && processedNode.node.feature.type === "api") {
|
||||
const callInfo = getCallInfo(node, layout);
|
||||
if (callInfo) {
|
||||
result.children.push({
|
||||
key: key,
|
||||
data: {
|
||||
type: "call-info",
|
||||
name: callInfo
|
||||
},
|
||||
children: []
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function getCallInfo(node, layout) {
|
||||
if (!node.locations || node.locations.length === 0) return null;
|
||||
|
||||
const location = node.locations[0];
|
||||
if (location.type !== "call") return null;
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const [ppid, pid, tid, callId] = location.value;
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const callName = node.node.feature.api;
|
||||
|
||||
const pname = getProcessName(layout, location);
|
||||
const cname = getCallName(layout, location);
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const [fname, separator, restWithArgs] = partition(cname, "(");
|
||||
const [args, , returnValueWithParen] = rpartition(restWithArgs, ")");
|
||||
|
||||
const s = [];
|
||||
s.push(`${fname}(`);
|
||||
for (const arg of args.split(", ")) {
|
||||
s.push(` ${arg},`);
|
||||
}
|
||||
s.push(`)${returnValueWithParen}`);
|
||||
|
||||
//const callInfo = `${pname}{pid:${pid},tid:${tid},call:${callId}}\n${s.join('\n')}`;
|
||||
|
||||
return { processName: pname, callInfo: s.join("\n") };
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits a string into three parts based on the first occurrence of a separator.
|
||||
* This function mimics Python's str.partition() method.
|
||||
*
|
||||
* @param {string} str - The input string to be partitioned.
|
||||
* @param {string} separator - The separator to use for partitioning.
|
||||
* @returns {Array<string>} An array containing three elements:
|
||||
* 1. The part of the string before the separator.
|
||||
* 2. The separator itself.
|
||||
* 3. The part of the string after the separator.
|
||||
* If the separator is not found, returns [str, '', ''].
|
||||
*
|
||||
* @example
|
||||
* // Returns ["hello", ",", "world"]
|
||||
* partition("hello,world", ",");
|
||||
*
|
||||
* @example
|
||||
* // Returns ["hello world", "", ""]
|
||||
* partition("hello world", ":");
|
||||
*/
|
||||
function partition(str, separator) {
|
||||
const index = str.indexOf(separator);
|
||||
if (index === -1) {
|
||||
// Separator not found, return original string and two empty strings
|
||||
return [str, "", ""];
|
||||
}
|
||||
return [str.slice(0, index), separator, str.slice(index + separator.length)];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the process name from the layout
|
||||
* @param {Object} layout - The layout object
|
||||
* @param {Object} address - The address object containing process information
|
||||
* @returns {string} The process name
|
||||
*/
|
||||
function getProcessName(layout, address) {
|
||||
if (!layout || !layout.processes || !Array.isArray(layout.processes)) {
|
||||
console.error("Invalid layout structure");
|
||||
return "Unknown Process";
|
||||
}
|
||||
|
||||
const [ppid, pid] = address.value;
|
||||
|
||||
for (const process of layout.processes) {
|
||||
if (
|
||||
process.address &&
|
||||
process.address.type === "process" &&
|
||||
process.address.value &&
|
||||
process.address.value[0] === ppid &&
|
||||
process.address.value[1] === pid
|
||||
) {
|
||||
return process.name || "Unnamed Process";
|
||||
}
|
||||
}
|
||||
|
||||
return "Unknown Process";
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits a string into three parts based on the last occurrence of a separator.
|
||||
* This function mimics Python's str.rpartition() method.
|
||||
*
|
||||
* @param {string} str - The input string to be partitioned.
|
||||
* @param {string} separator - The separator to use for partitioning.
|
||||
* @returns {Array<string>} An array containing three elements:
|
||||
* 1. The part of the string before the last occurrence of the separator.
|
||||
* 2. The separator itself.
|
||||
* 3. The part of the string after the last occurrence of the separator.
|
||||
* If the separator is not found, returns ['', '', str].
|
||||
*
|
||||
* @example
|
||||
* // Returns ["hello,", ",", "world"]
|
||||
* rpartition("hello,world,", ",");
|
||||
*
|
||||
* @example
|
||||
* // Returns ["", "", "hello world"]
|
||||
* rpartition("hello world", ":");
|
||||
*/
|
||||
function rpartition(str, separator) {
|
||||
const index = str.lastIndexOf(separator);
|
||||
if (index === -1) {
|
||||
// Separator not found, return two empty strings and the original string
|
||||
return ["", "", str];
|
||||
}
|
||||
return [
|
||||
str.slice(0, index), // Part before the last separator
|
||||
separator, // The separator itself
|
||||
str.slice(index + separator.length) // Part after the last separator
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the call name from the layout
|
||||
* @param {Object} layout - The layout object
|
||||
* @param {Object} address - The address object containing call information
|
||||
* @returns {string} The call name with arguments
|
||||
*/
|
||||
function getCallName(layout, address) {
|
||||
if (!layout || !layout.processes || !Array.isArray(layout.processes)) {
|
||||
console.error("Invalid layout structure");
|
||||
return "Unknown Call";
|
||||
}
|
||||
|
||||
const [ppid, pid, tid, callId] = address.value;
|
||||
|
||||
for (const process of layout.processes) {
|
||||
if (
|
||||
process.address &&
|
||||
process.address.type === "process" &&
|
||||
process.address.value &&
|
||||
process.address.value[0] === ppid &&
|
||||
process.address.value[1] === pid
|
||||
) {
|
||||
for (const thread of process.matched_threads) {
|
||||
if (
|
||||
thread.address &&
|
||||
thread.address.type === "thread" &&
|
||||
thread.address.value &&
|
||||
thread.address.value[2] === tid
|
||||
) {
|
||||
for (const call of thread.matched_calls) {
|
||||
if (
|
||||
call.address &&
|
||||
call.address.type === "call" &&
|
||||
call.address.value &&
|
||||
call.address.value[3] === callId
|
||||
) {
|
||||
return call.name || "Unnamed Call";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "Unknown Call";
|
||||
}
|
||||
|
||||
function processRegexCaptures(node, key) {
|
||||
if (!node.captures) return [];
|
||||
|
||||
return Object.entries(node.captures).map(([capture, locations]) => ({
|
||||
key: key,
|
||||
data: {
|
||||
type: "regex-capture",
|
||||
name: `"${escape(capture)}"`,
|
||||
address: formatAddress(locations[0])
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
function formatAddress(address) {
|
||||
switch (address.type) {
|
||||
case "absolute":
|
||||
return formatHex(address.value);
|
||||
case "relative":
|
||||
return `base address+${formatHex(address.value)}`;
|
||||
case "file":
|
||||
return `file+${formatHex(address.value)}`;
|
||||
case "dn token":
|
||||
return `token(${formatHex(address.value)})`;
|
||||
case "dn token offset": {
|
||||
const [token, offset] = address.value;
|
||||
return `token(${formatHex(token)})+${formatHex(offset)}`;
|
||||
}
|
||||
case "process":
|
||||
//const [ppid, pid] = address.value;
|
||||
//return `process{pid:${pid}}`;
|
||||
return formatDynamicAddress(address.value);
|
||||
case "thread":
|
||||
//const [threadPpid, threadPid, tid] = address.value;
|
||||
//return `process{pid:${threadPid},tid:${tid}}`;
|
||||
return formatDynamicAddress(address.value);
|
||||
case "call":
|
||||
//const [callPpid, callPid, callTid, id] = address.value;
|
||||
//return `process{pid:${callPid},tid:${callTid},call:${id}}`;
|
||||
return formatDynamicAddress(address.value);
|
||||
case "no address":
|
||||
return "";
|
||||
default:
|
||||
throw new Error("Unexpected address type");
|
||||
}
|
||||
}
|
||||
|
||||
function escape(str) {
|
||||
return str.replace(/"/g, '\\"');
|
||||
}
|
||||
|
||||
/**
|
||||
* Inverts the success values for children of a 'not' statement
|
||||
* @param {Object} node - The node to invert
|
||||
* @returns {Object} The inverted node
|
||||
*/
|
||||
function invertNotStatementSuccess(node) {
|
||||
if (!node) return null;
|
||||
|
||||
return {
|
||||
...node,
|
||||
children: node.children
|
||||
? node.children.map((child) => ({
|
||||
...child,
|
||||
success: !child.success,
|
||||
children: child.children ? invertNotStatementSuccess(child).children : []
|
||||
}))
|
||||
: []
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the description of a node
|
||||
* @param {Object} node - The node to get the description from
|
||||
* @returns {string|null} The description or null if not found
|
||||
*/
|
||||
function getNodeDescription(node) {
|
||||
if (node.node.statement) {
|
||||
return node.node.statement.description;
|
||||
} else if (node.node.feature) {
|
||||
return node.node.feature.description;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the name of a node
|
||||
* @param {Object} node - The node to get the name from
|
||||
* @returns {string} The name of the node
|
||||
*/
|
||||
function getNodeName(node) {
|
||||
if (node.node.statement) {
|
||||
return getStatementName(node.node.statement);
|
||||
} else if (node.node.feature) {
|
||||
return getFeatureName(node.node.feature);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the name for a statement node
|
||||
* @param {Object} statement - The statement object
|
||||
* @returns {string} The name of the statement
|
||||
*/
|
||||
function getStatementName(statement) {
|
||||
switch (statement.type) {
|
||||
case "subscope":
|
||||
// for example, "basic block: "
|
||||
return `${statement.scope}:`;
|
||||
case "range":
|
||||
return getRangeName(statement);
|
||||
case "some":
|
||||
return `${statement.count} or more`;
|
||||
default:
|
||||
// statement (e.g. "and: ", "or: ", "optional:", ... etc)
|
||||
return `${statement.type}:`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the name for a feature node
|
||||
* @param {Object} feature - The feature object
|
||||
* @returns {string} The name of the feature
|
||||
*/
|
||||
function getFeatureName(feature) {
|
||||
switch (feature.type) {
|
||||
case "number":
|
||||
case "offset":
|
||||
// example: "number: 0x1234", "offset: 0x3C"
|
||||
// return `${feature.type}: 0x${feature[feature.type].toString(16).toUpperCase()}`
|
||||
return `0x${feature[feature.type].toString(16).toUpperCase()}`;
|
||||
case "bytes":
|
||||
return formatBytes(feature.bytes);
|
||||
case "operand offset":
|
||||
return `operand[${feature.index}].offset: 0x${feature.operand_offset.toString(16).toUpperCase()}`;
|
||||
default:
|
||||
return `${feature[feature.type]}`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats the name for a range statement
|
||||
* @param {Object} statement - The range statement object
|
||||
* @returns {string} The formatted range name
|
||||
*/
|
||||
function getRangeName(statement) {
|
||||
const { child, min, max } = statement;
|
||||
const { type, [type]: value } = child;
|
||||
const rangeType = value || value === 0 ? `count(${type}(${value}))` : `count(${type})`;
|
||||
let rangeValue;
|
||||
|
||||
if (min === max) {
|
||||
rangeValue = `${min}`;
|
||||
} else if (max >= Number.MAX_SAFE_INTEGER) {
|
||||
rangeValue = `${min} or more`;
|
||||
} else {
|
||||
rangeValue = `between ${min} and ${max}`;
|
||||
}
|
||||
|
||||
// for example: count(mnemonic(xor)): 2 or more
|
||||
return `${rangeType}: ${rangeValue} `;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the address of a node
|
||||
* @param {Object} node - The node to get the address from
|
||||
* @returns {string|null} The formatted address or null if not found
|
||||
*/
|
||||
function getNodeAddress(node) {
|
||||
if (node.node.feature && node.node.feature.type === "regex") return null;
|
||||
if (node.locations && node.locations.length > 0) {
|
||||
return formatAddress(node.locations[0]);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats bytes string for display
|
||||
* @param {Array} value - The bytes string
|
||||
* @returns {string} - Formatted bytes string
|
||||
*/
|
||||
|
||||
function formatBytes(byteString) {
|
||||
// Use a regular expression to insert a space after every two characters
|
||||
const formattedString = byteString.replace(/(.{2})/g, "$1 ").trim();
|
||||
// convert to uppercase
|
||||
return formattedString.toUpperCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats the address for dynamic flavor
|
||||
* @param {Array} value - The address value array
|
||||
* @returns {string} - Formatted address string
|
||||
*/
|
||||
function formatDynamicAddress(value) {
|
||||
const parts = ["ppid", "pid", "tid", "id"];
|
||||
return value
|
||||
.map((item, index) => `${parts[index]}:${item}`)
|
||||
.reverse()
|
||||
.join(",");
|
||||
}
|
||||
|
||||
function formatHex(address) {
|
||||
return `0x${address.toString(16).toUpperCase()}`;
|
||||
}
|
||||
79
web/explorer/src/utils/urlHelpers.js
Normal file
79
web/explorer/src/utils/urlHelpers.js
Normal file
@@ -0,0 +1,79 @@
|
||||
/**
|
||||
* Creates an MBC (Malware Behavior Catalog) URL from an MBC object.
|
||||
*
|
||||
* @param {Object} mbc - The MBC object to format.
|
||||
* @param {string} mbc.id - The ID of the MBC entry.
|
||||
* @param {string} mbc.objective - The objective of the malware behavior.
|
||||
* @param {string} mbc.behavior - The specific behavior of the malware.
|
||||
* @returns {string|null} The MBC URL or null if the ID is invalid.
|
||||
*/
|
||||
export function createMBCHref(mbc) {
|
||||
let baseUrl;
|
||||
|
||||
// Determine the base URL based on the id first character
|
||||
if (["B", "T", "E", "F"].includes(mbc.id[0])) {
|
||||
// Behavior
|
||||
baseUrl = "https://github.com/MBCProject/mbc-markdown/blob/main";
|
||||
} else if (mbc.id.startsWith("C")) {
|
||||
// Micro-Behavior
|
||||
baseUrl = "https://github.com/MBCProject/mbc-markdown/blob/main/micro-behaviors";
|
||||
} else {
|
||||
// unknown
|
||||
return null;
|
||||
}
|
||||
|
||||
// Convert the objective and behavior to lowercase and replace spaces with hyphens
|
||||
const objectivePath = mbc.objective.toLowerCase().replace(/\s+/g, "-");
|
||||
const behaviorPath = mbc.behavior.toLowerCase().replace(/\s+/g, "-");
|
||||
|
||||
// Construct the final URL
|
||||
return `${baseUrl}/${objectivePath}/${behaviorPath}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a MITRE ATT&CK URL for a specific technique or sub-technique.
|
||||
*
|
||||
* @param {Object} attack - The ATT&CK object containing information about the technique.
|
||||
* @param {string} attack.id - The ID of the ATT&CK technique or sub-technique.
|
||||
* @returns {string|null} The formatted MITRE ATT&CK URL for the technique or null if the ID is invalid.
|
||||
*/
|
||||
export function createATTACKHref(attack) {
|
||||
const baseUrl = "https://attack.mitre.org/techniques/";
|
||||
const idParts = attack.id.split(".");
|
||||
|
||||
if (idParts.length === 1) {
|
||||
// It's a technique
|
||||
return `${baseUrl}${idParts[0]}`;
|
||||
} else if (idParts.length === 2) {
|
||||
// It's a sub-technique
|
||||
return `${baseUrl}${idParts[0]}/${idParts[1]}`;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a CAPA rules URL for a given node with tag.
|
||||
*
|
||||
* @param {Object} node - The node object containing data about the rule.
|
||||
* @param {string} node.data.namespace - The namespace of the rule (optional).
|
||||
* @param {string} node.data.name - The name of the rule.
|
||||
* @returns {string} The formatted CAPA rules URL.
|
||||
*/
|
||||
export function createCapaRulesUrl(node, tag) {
|
||||
if (!node || !node.data || !tag) return null;
|
||||
const namespace = node.data.namespace || "lib";
|
||||
const ruleName = node.data.name.toLowerCase().replace(/\s+/g, "-");
|
||||
return `https://github.com/mandiant/capa-rules/blob/v${tag}/${namespace}/${ruleName}.yml`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a VirusTotal deep link URL for a given behavior signature.
|
||||
*
|
||||
* @param {string} behaviorName - The name of the behavior signature.
|
||||
* @returns {string} The formatted VirusTotal URL.
|
||||
*/
|
||||
export function createVirusTotalUrl(behaviorName) {
|
||||
const behaviourSignature = `behaviour_signature:"${behaviorName}"`;
|
||||
return `https://www.virustotal.com/gui/search/${encodeURIComponent(behaviourSignature)}/files`;
|
||||
}
|
||||
76
web/explorer/src/views/AnalysisView.vue
Normal file
76
web/explorer/src/views/AnalysisView.vue
Normal file
@@ -0,0 +1,76 @@
|
||||
<template>
|
||||
<MetadataPanel :data="doc" />
|
||||
<SettingsPanel
|
||||
:flavor="doc.meta.flavor"
|
||||
:library-rule-matches-count="libraryRuleMatchesCount"
|
||||
@update:show-capabilities-by-function-or-process="updateShowCapabilitiesByFunctionOrProcess"
|
||||
@update:show-library-rules="updateShowLibraryRules"
|
||||
@update:show-namespace-chart="updateShowNamespaceChart"
|
||||
@update:show-column-filters="updateShowColumnFilters"
|
||||
/>
|
||||
<RuleMatchesTable
|
||||
v-if="!showCapabilitiesByFunctionOrProcess && !showNamespaceChart"
|
||||
:data="doc"
|
||||
:show-library-rules="showLibraryRules"
|
||||
:show-column-filters="showColumnFilters"
|
||||
/>
|
||||
<FunctionCapabilities
|
||||
v-if="doc.meta.flavor === 'static' && showCapabilitiesByFunctionOrProcess && !showNamespaceChart"
|
||||
:data="doc"
|
||||
:show-library-rules="showLibraryRules"
|
||||
:show-column-filters="showColumnFilters"
|
||||
/>
|
||||
<ProcessCapabilities
|
||||
v-else-if="doc.meta.flavor === 'dynamic' && showCapabilitiesByFunctionOrProcess && !showNamespaceChart"
|
||||
:data="doc"
|
||||
:show-capabilities-by-process="showCapabilitiesByFunctionOrProcess"
|
||||
:show-library-rules="showLibraryRules"
|
||||
:show-column-filters="showColumnFilters"
|
||||
/>
|
||||
<NamespaceChart v-else-if="showNamespaceChart" :data="doc" />
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, computed } from "vue";
|
||||
|
||||
// Componenets
|
||||
import MetadataPanel from "@/components/MetadataPanel.vue";
|
||||
import SettingsPanel from "@/components/SettingsPanel.vue";
|
||||
import RuleMatchesTable from "@/components/RuleMatchesTable.vue";
|
||||
import FunctionCapabilities from "@/components/FunctionCapabilities.vue";
|
||||
import ProcessCapabilities from "@/components/ProcessCapabilities.vue";
|
||||
import NamespaceChart from "@/components/NamespaceChart.vue";
|
||||
|
||||
// Import loaded rdoc
|
||||
import { rdocStore } from "@/store/rdocStore";
|
||||
const doc = rdocStore.data.value;
|
||||
|
||||
// Viewing options
|
||||
const showCapabilitiesByFunctionOrProcess = ref(false);
|
||||
const showLibraryRules = ref(false);
|
||||
const showNamespaceChart = ref(false);
|
||||
const showColumnFilters = ref(false);
|
||||
|
||||
// Count library rules
|
||||
const libraryRuleMatchesCount = computed(() => {
|
||||
if (!doc || !doc.rules) return 0;
|
||||
return Object.values(rdocStore.data.value.rules).filter((rule) => rule.meta.lib).length;
|
||||
});
|
||||
|
||||
// Event handlers to update variables
|
||||
const updateShowCapabilitiesByFunctionOrProcess = (value) => {
|
||||
showCapabilitiesByFunctionOrProcess.value = value;
|
||||
};
|
||||
|
||||
const updateShowLibraryRules = (value) => {
|
||||
showLibraryRules.value = value;
|
||||
};
|
||||
|
||||
const updateShowNamespaceChart = (value) => {
|
||||
showNamespaceChart.value = value;
|
||||
};
|
||||
|
||||
const updateShowColumnFilters = (value) => {
|
||||
showColumnFilters.value = value;
|
||||
};
|
||||
</script>
|
||||
78
web/explorer/src/views/ImportView.vue
Normal file
78
web/explorer/src/views/ImportView.vue
Normal file
@@ -0,0 +1,78 @@
|
||||
<template>
|
||||
<DescriptionPanel />
|
||||
<UploadOptions
|
||||
@load-from-local="loadFromLocal"
|
||||
@load-from-url="loadFromURL"
|
||||
@load-demo-static="loadDemoDataStatic"
|
||||
@load-demo-dynamic="loadDemoDataDynamic"
|
||||
/>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { watch } from "vue";
|
||||
|
||||
// componenets
|
||||
import DescriptionPanel from "@/components/DescriptionPanel.vue";
|
||||
import UploadOptions from "@/components/UploadOptions.vue";
|
||||
|
||||
// import demo data
|
||||
import demoRdocStatic from "@testfiles/rd/al-khaser_x64.exe_.json";
|
||||
import demoRdocDynamic from "@testfiles/rd/0000a65749f5902c4d82ffa701198038f0b4870b00a27cfca109f8f933476d82.json";
|
||||
|
||||
// import router utils
|
||||
import { useRouter, useRoute } from "vue-router";
|
||||
const router = useRouter();
|
||||
const route = useRoute();
|
||||
|
||||
// import rdoc loader function
|
||||
import { useRdocLoader } from "@/composables/useRdocLoader";
|
||||
const { loadRdoc } = useRdocLoader();
|
||||
|
||||
// import rdoc store
|
||||
import { rdocStore } from "@/store/rdocStore";
|
||||
|
||||
const loadFromLocal = async (event) => {
|
||||
const result = await loadRdoc(event.files[0]);
|
||||
if (result) {
|
||||
rdocStore.setData(result);
|
||||
router.push("/analysis");
|
||||
}
|
||||
};
|
||||
|
||||
const loadFromURL = async (url) => {
|
||||
const result = await loadRdoc(url);
|
||||
if (result) {
|
||||
rdocStore.setData(result);
|
||||
router.push({ name: "analysis", query: { rdoc: url } });
|
||||
}
|
||||
};
|
||||
|
||||
const loadDemoDataStatic = async () => {
|
||||
const result = await loadRdoc(demoRdocStatic);
|
||||
if (result) {
|
||||
rdocStore.setData(demoRdocStatic);
|
||||
router.push("/analysis");
|
||||
}
|
||||
};
|
||||
|
||||
const loadDemoDataDynamic = async () => {
|
||||
const result = await loadRdoc(demoRdocDynamic);
|
||||
if (result) {
|
||||
rdocStore.setData(demoRdocDynamic);
|
||||
router.push("/analysis");
|
||||
}
|
||||
};
|
||||
|
||||
// Watch for changes in the rdoc query parameter
|
||||
watch(
|
||||
() => route.query.rdoc,
|
||||
(rdocURL) => {
|
||||
if (rdocURL) {
|
||||
// Clear the query parameter
|
||||
router.replace({ query: {} });
|
||||
loadFromURL(decodeURIComponent(rdocURL));
|
||||
}
|
||||
},
|
||||
{ immediate: true }
|
||||
);
|
||||
</script>
|
||||
19
web/explorer/src/views/NotFoundView.vue
Normal file
19
web/explorer/src/views/NotFoundView.vue
Normal file
@@ -0,0 +1,19 @@
|
||||
<template>
|
||||
<div class="flex flex-column align-items-center justify-content-center min-h-screen bg-blue-50">
|
||||
<h1 class="text-900 font-bold text-8xl mb-4">404</h1>
|
||||
<p class="text-600 text-3xl mb-5">Oops! The page you're looking for doesn't exist.</p>
|
||||
|
||||
<Button label="Go Home" icon="pi pi-home" @click="goHome" />
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { useRouter } from "vue-router";
|
||||
import Button from "primevue/button";
|
||||
|
||||
const router = useRouter();
|
||||
|
||||
const goHome = () => {
|
||||
router.push("/");
|
||||
};
|
||||
</script>
|
||||
19
web/explorer/vite.config.js
Normal file
19
web/explorer/vite.config.js
Normal file
@@ -0,0 +1,19 @@
|
||||
import { defineConfig } from "vite";
|
||||
import vue from "@vitejs/plugin-vue";
|
||||
import { viteSingleFile } from "vite-plugin-singlefile";
|
||||
import { fileURLToPath, URL } from "node:url";
|
||||
|
||||
export default defineConfig(({ mode }) => {
|
||||
const isBundle = mode === "bundle";
|
||||
|
||||
return {
|
||||
base: "./",
|
||||
plugins: isBundle ? [vue(), viteSingleFile()] : [vue()],
|
||||
resolve: {
|
||||
alias: {
|
||||
"@": fileURLToPath(new URL("src", import.meta.url)),
|
||||
"@testfiles": fileURLToPath(new URL("../../tests/data", import.meta.url))
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
12
web/explorer/vitest.config.js
Normal file
12
web/explorer/vitest.config.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import { defineConfig } from "vitest/config";
|
||||
import vue from "@vitejs/plugin-vue";
|
||||
|
||||
export default defineConfig({
|
||||
plugins: [vue()],
|
||||
test: {
|
||||
globals: true,
|
||||
environment: "jsdom",
|
||||
exclude: ["node_modules", "dist", ".idea", ".git", ".cache"],
|
||||
include: ["src/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}"]
|
||||
}
|
||||
});
|
||||
BIN
web/public/img/icon.ico
Normal file
BIN
web/public/img/icon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
BIN
web/public/img/icon.png
Normal file
BIN
web/public/img/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.4 KiB |
49
web/public/index.html
Normal file
49
web/public/index.html
Normal file
@@ -0,0 +1,49 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="img/icon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>capa</title>
|
||||
<style>
|
||||
/*
|
||||
Josh's Custom CSS Reset
|
||||
https://www.joshwcomeau.com/css/custom-css-reset/
|
||||
*/
|
||||
*, *::before, *::after {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
* {
|
||||
margin: 0;
|
||||
}
|
||||
body {
|
||||
line-height: 1.5;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
img, picture, video, canvas, svg {
|
||||
display: block;
|
||||
max-width: 100%;
|
||||
}
|
||||
input, button, textarea, select {
|
||||
font: inherit;
|
||||
}
|
||||
p, h1, h2, h3, h4, h5, h6 {
|
||||
overflow-wrap: break-word;
|
||||
}
|
||||
#root, #__next {
|
||||
isolation: isolate;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div style="height: 100%; display: flex; align-items: center; justify-content: center;">
|
||||
<div>
|
||||
<!-- this is centered -->
|
||||
<img src="./img/icon.png" />
|
||||
<br />
|
||||
<a href="./explorer/">capa Explorer Web<a>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user