mirror of
https://github.com/SpacehuhnTech/esp8266_deauther.git
synced 2025-12-12 15:50:47 -08:00
Version 2.0
This commit is contained in:
@@ -1,81 +0,0 @@
|
||||
#/usr/bin/env python
|
||||
"""This scripts downloads the last OUI manufaturer file from the Whireshark
|
||||
project and converts it to esp8266_deauther format"""
|
||||
|
||||
import argparse
|
||||
from urllib.request import urlopen
|
||||
|
||||
WS_MANUF_FILE_URL = "https://code.wireshark.org/review/gitweb?p=wireshark.git;a=blob_plain;f=manuf"
|
||||
|
||||
def parse_options():
|
||||
"""Parses command line options"""
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-o", "--output", help="Output file name (e.g oui.h)")
|
||||
parser.add_argument("-u", "--url", help="Wireshark oui/manuf file url")
|
||||
|
||||
opt = parser.parse_args()
|
||||
|
||||
return opt
|
||||
|
||||
def generate_oui_h(url, filename):
|
||||
"""Generates the vendors/oui file"""
|
||||
|
||||
if url:
|
||||
data = urlopen(url)
|
||||
else:
|
||||
data = urlopen(WS_MANUF_FILE_URL)
|
||||
|
||||
out = """#ifndef oui_h
|
||||
#define oui_h
|
||||
/*
|
||||
Based on Wireshark manufacturer database
|
||||
source: https://www.wireshark.org/tools/oui-lookup.html
|
||||
Wireshark is released under the GNU General Public License version 2
|
||||
*/
|
||||
|
||||
const static uint8_t data_vendors[] PROGMEM = {///*
|
||||
"""
|
||||
|
||||
for line in data:
|
||||
line = line.decode()
|
||||
|
||||
# Skipping empty lines and comments
|
||||
if line.startswith('#') or line.startswith('\n'):
|
||||
continue
|
||||
|
||||
mac, short_desc, *rest = line.strip().split('\t')
|
||||
|
||||
# Limiting short_desc to 8 chars
|
||||
short_desc = short_desc[0:8]
|
||||
|
||||
# Convert to ascii
|
||||
short_desc = short_desc.encode("ascii", "ignore").decode()
|
||||
|
||||
mac_octects = len(mac.split(':'))
|
||||
if mac_octects == 6:
|
||||
continue
|
||||
else:
|
||||
# Convert to esp8266_deauther format
|
||||
short_desc = short_desc.ljust(8, '\0')
|
||||
hex_sdesc = ", 0x".join("{:02x}".format(ord(c)) for c in short_desc)
|
||||
|
||||
(oc1, oc2, oc3) = mac.split(':')
|
||||
|
||||
out = out + (" 0x{}, 0x{}, 0x{}, 0x{},\n".format(oc1.upper(), oc2.upper(), oc3.upper(),
|
||||
hex_sdesc.upper().replace('X', 'x')))
|
||||
|
||||
out = out[:-2] # Removing last comma
|
||||
out = out + "\n};\n#endif"
|
||||
|
||||
# Saving to file
|
||||
if filename:
|
||||
with open(filename, 'w') as out_file:
|
||||
out_file.write("%s" % out)
|
||||
else:
|
||||
print(out)
|
||||
|
||||
if __name__ == "__main__":
|
||||
options = parse_options()
|
||||
generate_oui_h(options.url, options.output)
|
||||
|
||||
5
utils/vendor_list_updater/README.md
Normal file
5
utils/vendor_list_updater/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
`python3 update_manuf.py -o oui.h -s`
|
||||
|
||||
This Python script updates the manufacturer list oui.h in deauther2.0/esp8266_deauther.
|
||||
|
||||
The -s option is for creating a limited list of the top 1000 vendors. That is enough for most devices and it makes the list fit in 512kb.
|
||||
144
utils/vendor_list_updater/update_manuf.py
Normal file
144
utils/vendor_list_updater/update_manuf.py
Normal file
@@ -0,0 +1,144 @@
|
||||
#/usr/bin/env python3
|
||||
# This scripts downloads the last OUI manufaturer file from the Whireshark
|
||||
# project and converts it to esp8266_deauther format
|
||||
#
|
||||
# Copyright (c) 2018 xdavidhu
|
||||
# https://github.com/xdavidhu/
|
||||
#
|
||||
|
||||
import argparse
|
||||
from urllib.request import urlopen
|
||||
|
||||
WS_MANUF_FILE_URL = "https://code.wireshark.org/review/gitweb?p=wireshark.git;a=blob_plain;f=manuf"
|
||||
macs = []
|
||||
vendors = []
|
||||
tempVendors = []
|
||||
|
||||
def padhex(s):
|
||||
return '0x' + s[2:].zfill(2)
|
||||
|
||||
def parse_options():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-o", "--output", help="Output file name for macs list", required=True)
|
||||
parser.add_argument("-s", "--small", action='store_true', help="Generate small file only with most used 10 000 macs")
|
||||
parser.add_argument("-u", "--url", help="Wireshark oui/manuf file url")
|
||||
|
||||
opt = parser.parse_args()
|
||||
|
||||
return opt
|
||||
|
||||
def generate_lists(url, output, small):
|
||||
|
||||
global tempVendors
|
||||
global vendors
|
||||
global macs
|
||||
|
||||
if url:
|
||||
data = urlopen(url)
|
||||
else:
|
||||
data = urlopen(WS_MANUF_FILE_URL)
|
||||
|
||||
lines = data.readlines()
|
||||
|
||||
for line in lines:
|
||||
line = line.decode()
|
||||
if line.startswith('#') or line.startswith('\n'):
|
||||
continue
|
||||
mac, short_desc, *rest = line.strip().split('\t')
|
||||
short_desc = short_desc[0:8]
|
||||
short_desc = short_desc.encode("ascii", "ignore").decode()
|
||||
mac_octects = len(mac.split(':'))
|
||||
if mac_octects == 6:
|
||||
continue
|
||||
else:
|
||||
inList = False
|
||||
for vendor in tempVendors:
|
||||
if vendor[0] == short_desc:
|
||||
inList = True
|
||||
vendor[1] += 1
|
||||
break
|
||||
if not inList:
|
||||
tempVendors.append([short_desc, 1])
|
||||
|
||||
if small:
|
||||
tempVendors.sort(key=lambda x: x[1])
|
||||
tempVendors.reverse()
|
||||
#tempVendors = tempVendors[:1000]
|
||||
|
||||
for vendor in tempVendors:
|
||||
vendors.append(vendor[0])
|
||||
|
||||
for line in lines:
|
||||
line = line.decode()
|
||||
if line.startswith('#') or line.startswith('\n'):
|
||||
continue
|
||||
mac, short_desc, *rest = line.strip().split('\t')
|
||||
short_desc = short_desc[0:8]
|
||||
short_desc = short_desc.encode("ascii", "ignore").decode()
|
||||
mac_octects = len(mac.split(':'))
|
||||
if mac_octects == 6:
|
||||
continue
|
||||
else:
|
||||
for vendor in vendors:
|
||||
if vendor == short_desc:
|
||||
index = vendors.index(vendor)
|
||||
macs.append([mac, index])
|
||||
|
||||
generate_files(output)
|
||||
|
||||
|
||||
def generate_files(output):
|
||||
global tempVendors
|
||||
global vendors
|
||||
global macs
|
||||
|
||||
# 'vendors' list
|
||||
vendorsTxt = ""
|
||||
for vendor in vendors:
|
||||
vendor = vendor.ljust(8, '\0')
|
||||
hex_vendor = ", 0x".join("{:02x}".format(ord(c)) for c in vendor)
|
||||
line = "0x" + hex_vendor
|
||||
vendorsTxt += line + ",\n"
|
||||
vendorsTxt = vendorsTxt[:-2] + "\n"
|
||||
|
||||
# 'macs' list
|
||||
macsTxt = ""
|
||||
for mac in macs:
|
||||
macaddr = mac[0]
|
||||
vendorindex = mac[1]
|
||||
(oc1, oc2, oc3) = macaddr.split(':')
|
||||
if vendorindex > 255:
|
||||
num = vendorindex
|
||||
index_bytes = []
|
||||
while num > 0:
|
||||
byte = num % 0x100
|
||||
index_bytes.append(byte)
|
||||
num //= 0x100
|
||||
hex_index = ""
|
||||
for byte in index_bytes:
|
||||
hex_index += padhex(hex(byte)) + ", "
|
||||
hex_index = hex_index[:-2]
|
||||
else:
|
||||
hex_index = padhex(hex(vendorindex)) + ", 0x00"
|
||||
line = "0x" + oc1.upper() + ", " + "0x" + oc2.upper() + ", " + "0x" + oc3.upper() + ", " + hex_index
|
||||
macsTxt += line + ",\n"
|
||||
macsTxt = macsTxt[:-2] + "\n"
|
||||
|
||||
# Saving to file
|
||||
if output:
|
||||
with open(output, 'w') as out_file:
|
||||
out_file.write("#ifndef oui_h\n#define oui_h\n/*\n Based on Wireshark manufacturer database\n source: https://www.wireshark.org/tools/oui-lookup.html\n Wireshark is released under the GNU General Public License version 2\n*/\n\n#define ENABLE_MAC_LIST // comment out if you want to save memory\n\n")
|
||||
out_file.write("const static uint8_t data_vendors[] PROGMEM = {\n#ifdef ENABLE_MAC_LIST\n")
|
||||
out_file.write(vendorsTxt)
|
||||
out_file.write("#endif\n};\n")
|
||||
out_file.write("const static uint8_t data_macs[] PROGMEM = {\n#ifdef ENABLE_MAC_LIST\n")
|
||||
out_file.write(macsTxt)
|
||||
out_file.write("#endif\n};\n#endif")
|
||||
out_file.close()
|
||||
|
||||
|
||||
print("Done.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
options = parse_options()
|
||||
generate_lists(options.url, options.output, options.small)
|
||||
57
utils/web_converter/convert_all.sh
Normal file
57
utils/web_converter/convert_all.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# This script walks through the html folder and minify all JS, HTML and CSS files. It also generates
|
||||
# the corresponding constants that is added to the data.h file on esp8266_deauther folder.
|
||||
#
|
||||
# @Author Erick B. Tedeschi < erickbt86 [at] gmail [dot] com >
|
||||
#
|
||||
|
||||
outputfile="$(pwd)/data_h_temp"
|
||||
|
||||
rm $outputfile
|
||||
|
||||
function minify_html_css {
|
||||
file=$1
|
||||
curl -X POST -s --data-urlencode "input@$file" http://html-minifier.com/raw > /tmp/converter.temp
|
||||
}
|
||||
|
||||
function minify_js {
|
||||
file=$1
|
||||
curl -X POST -s --data-urlencode "input@$file" https://javascript-minifier.com/raw > /tmp/converter.temp
|
||||
}
|
||||
|
||||
function ascii2hexCstyle {
|
||||
file_name=$(constFileName $1)
|
||||
result=$(cat /tmp/converter.temp | hexdump -ve '1/1 "0x%.2x,"')
|
||||
result=$(echo $result | sed 's/,$//')
|
||||
echo "const char data_${file_name}[] PROGMEM = {$result};"
|
||||
}
|
||||
|
||||
function constFileName {
|
||||
extension=$(echo $1 | egrep -io "(css|js|html)$" | tr "[:lower:]" "[:upper:]")
|
||||
file=$(echo $1 | sed 's/\.css//' | sed 's/\.html//' | sed 's/\.js//' | sed 's/\.\///' | tr '/' '_' | tr '.' '_')
|
||||
echo $file$extension
|
||||
}
|
||||
|
||||
|
||||
cd html
|
||||
file_list=$(find . -type f)
|
||||
|
||||
for file in $file_list; do
|
||||
echo "Processing: $file"
|
||||
if [[ "$file" == *.js ]]; then
|
||||
echo "-> JS minifier"
|
||||
minify_js $file
|
||||
ascii2hexCstyle $file >> $outputfile
|
||||
elif [[ "$file" == *.html ]] || [[ "$file" == *.css ]]; then
|
||||
echo "-> HTML and CSS minifier"
|
||||
minify_html_css $file
|
||||
ascii2hexCstyle $file >> $outputfile
|
||||
else
|
||||
echo "-> without minifier"
|
||||
cat $file > /tmp/converter.temp
|
||||
ascii2hexCstyle $file >> $outputfile
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
71
utils/web_converter/converter.html
Normal file
71
utils/web_converter/converter.html
Normal file
@@ -0,0 +1,71 @@
|
||||
<!Doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Byte Converter</title>
|
||||
<meta name="description" content="OConvert Text into Hex-Bytes">
|
||||
<meta name="author" content="Spacehuhn - Stefan Kremser">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link rel="stylesheet" href="style.css">
|
||||
<script src="jquery-3.2.1.min.js"></script>
|
||||
<style>
|
||||
textarea{
|
||||
width: 96%;
|
||||
height: 350px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<nav>
|
||||
<a href="index.html">Converter</a>
|
||||
<a href="https://github.com/spacehuhn" class="right">GitHub</a>
|
||||
</nav>
|
||||
|
||||
<div class="container">
|
||||
|
||||
<div class="row">
|
||||
<div class="col-12">
|
||||
<h1 class="header">Text to Byte Array Converter</h1>
|
||||
<p>
|
||||
Please use <a href="https://htmlcompressor.com/compressor/" target="_blank">HTMLCompressor</a> (or something similar) first to get your HTML, CSS and JS minified.<br />
|
||||
Every saved byte can improve the stability of the ESP8266's webserver!
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-6">
|
||||
<textarea id="input"></textarea>
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<textarea id="output" onclick="this.focus();this.select()" readonly="readonly"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-12">
|
||||
<button onclick="convert()" class="fullWidth button-primary">convert</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-12">
|
||||
<p>Length: <span id="info_len">0</span> Bytes</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<script>
|
||||
String.prototype.convertToHex = function (delim) {
|
||||
return this.split("").map(function(c) {
|
||||
return ("0" + c.charCodeAt(0).toString(16)).slice(-2);
|
||||
}).join(delim || "");
|
||||
};
|
||||
|
||||
function convert(){
|
||||
var input = $('#input').val().convertToHex(",0x");
|
||||
$('#output').val("0x"+input);
|
||||
$('#info_len').html((input.match(new RegExp(",", "g")) || []).length + 1);
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
4
utils/web_converter/jquery-3.2.1.min.js
vendored
Normal file
4
utils/web_converter/jquery-3.2.1.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
40
utils/web_converter/readme.md
Normal file
40
utils/web_converter/readme.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# How to update files inside html folder?
|
||||
The files related to the Frontend of ESP8266_Deauther are inside html folder.
|
||||
To reflect on the firmware it needs to be: minified, converted to hex and updated on data.h on esp8266_deauther folder on the root of this project.
|
||||
|
||||
The following process can be used:
|
||||
## Script Mode (Linux/Mac)
|
||||
|
||||
**1** Update the desired files on ./html folder
|
||||
**2** at the command line run the shell script: ./convert_all.sh
|
||||
**3** open the generated file "data_h_temp" and copy the content (CTRL+C)
|
||||
**4** Go to data.h and replace the content between the comments like below:
|
||||
```c
|
||||
/* constants generated by convert_all.sh - start */
|
||||
const char data_apscanHTML[] PROGMEM = {0x3c,0x21,0x44,0x4f,0x43...
|
||||
const char data_attackHTML[] PROGMEM = {0x3c,0x21,0x44,0x4f,0x43...
|
||||
const char data_errorHTML[] PROGMEM = {0x3c,0x21,0x44,0x4f,0x43,...
|
||||
const char data_indexHTML[] PROGMEM = {0x3c,0x21,0x44,0x4f,0x43,...
|
||||
const char data_infoHTML[] PROGMEM = {0x3c,0x21,0x44,0x4f,0x43,0...
|
||||
const char data_js_apscanJS[] PROGMEM = {0x66,0x75,0x6e,0x63,0x7...
|
||||
const char data_js_attackJS[] PROGMEM = {0x66,0x75,0x6e,0x63,0x7...
|
||||
const char data_js_functionsJS[] PROGMEM = {0x66,0x75,0x6e,0x63,...
|
||||
const char data_js_settingsJS[] PROGMEM = {0x66,0x75,0x6e,0x63,0...
|
||||
const char data_js_stationsJS[] PROGMEM = {0x66,0x75,0x6e,0x63,0...
|
||||
const char data_license[] PROGMEM = {0x43,0x6f,0x70,0x79,0x72,0x...
|
||||
const char data_settingsHTML[] PROGMEM = {0x3c,0x21,0x44,0x4f,0x...
|
||||
const char data_stationsHTML[] PROGMEM = {0x3c,0x21,0x44,0x4f,0x...
|
||||
const char data_styleCSS[] PROGMEM = {0x2f,0x2a,0x20,0x47,0x6c,0...
|
||||
/* constants generated by convert_all.sh - end */
|
||||
```
|
||||
|
||||
## Manual mode
|
||||
|
||||
**1** Use a minifier (e.g. htmlcompressor.com) to get your files as small as possible
|
||||
**2** Open converter.html
|
||||
**3** Paste the code in the left textfield
|
||||
**4** Press Convert
|
||||
**5** Copy the results from the right textfield
|
||||
**6** Go to data.h and replace the array of the changed file with the copied bytes
|
||||
|
||||
**Now compile and upload your new sketch :)**
|
||||
405
utils/web_converter/style.css
Normal file
405
utils/web_converter/style.css
Normal file
@@ -0,0 +1,405 @@
|
||||
/* Global */
|
||||
body {
|
||||
background: #36393e;
|
||||
color: #bfbfbf;
|
||||
font-family: sans-serif;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 1.7rem;
|
||||
margin-top: 1rem;
|
||||
background: #2f3136;
|
||||
color: #bfbfbb;
|
||||
padding: 0.2em 1em;
|
||||
border-radius: 3px;
|
||||
border-left: solid #4974a9 5px;
|
||||
font-weight: 100;
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 1.1rem;
|
||||
margin-top: 1rem;
|
||||
background: #2f3136;
|
||||
color: #bfbfbb;
|
||||
padding: 0.4em 1.8em;
|
||||
border-radius: 3px;
|
||||
border-left: solid #4974a9 5px;
|
||||
font-weight: 100;
|
||||
}
|
||||
|
||||
table{
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
label{
|
||||
line-height: 46px;
|
||||
}
|
||||
|
||||
input{
|
||||
line-height: 46px;
|
||||
}
|
||||
|
||||
.left {
|
||||
float: left;
|
||||
}
|
||||
.right {
|
||||
float: right;
|
||||
}
|
||||
.bold {
|
||||
font-weight: bold;
|
||||
}
|
||||
.red{
|
||||
color: #F04747;
|
||||
}
|
||||
.green{
|
||||
color:#43B581;
|
||||
}
|
||||
.clear {
|
||||
clear: both;
|
||||
}
|
||||
.centered{
|
||||
text-align: center;
|
||||
}
|
||||
.select{
|
||||
width: 98px !important;
|
||||
padding: 0 !important;
|
||||
}
|
||||
.selected{
|
||||
background: #4974a9;
|
||||
}
|
||||
.status{
|
||||
width: 120px;
|
||||
padding-left: 8px;
|
||||
}
|
||||
.labelFix {
|
||||
line-height: 40px;
|
||||
}
|
||||
.clickable{
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
#error {
|
||||
text-align: center;
|
||||
color: #fff;
|
||||
background: #af3535;
|
||||
border-radius: 5px;
|
||||
padding: 10px;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
#closeError{
|
||||
float: right;
|
||||
color: #fff;
|
||||
padding: 0px 10px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
#copyright{
|
||||
font-size: 0.95em;
|
||||
text-align: center;
|
||||
margin-top: 3em;
|
||||
margin-bottom: 3em;
|
||||
}
|
||||
|
||||
/* CHECKBOX */
|
||||
/* Customize the label (the container) */
|
||||
.checkBoxContainer {
|
||||
display: block;
|
||||
position: relative;
|
||||
padding-left: 35px;
|
||||
margin-bottom: 12px;
|
||||
cursor: pointer;
|
||||
font-size: 22px;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
user-select: none;
|
||||
height: 32px;
|
||||
width: 32px;
|
||||
}
|
||||
|
||||
/* Hide the browser's default checkbox */
|
||||
.checkBoxContainer input {
|
||||
position: absolute;
|
||||
opacity: 0;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/* Create a custom checkbox */
|
||||
.checkmark {
|
||||
position: absolute;
|
||||
top: 8px;
|
||||
left: 0;
|
||||
height: 28px;
|
||||
width: 28px;
|
||||
background-color: #2F3136;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
/* When the checkbox is checked, add a blue background */
|
||||
.checkBoxContainer input:checked ~ .checkmark {
|
||||
background-color: #4974A9;
|
||||
}
|
||||
|
||||
/* Create the checkmark/indicator (hidden when not checked) */
|
||||
.checkmark:after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Show the checkmark when checked */
|
||||
.checkBoxContainer input:checked ~ .checkmark:after {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* Style the checkmark/indicator */
|
||||
.checkBoxContainer .checkmark:after {
|
||||
left: 10px;
|
||||
top: 7px;
|
||||
width: 4px;
|
||||
height: 10px;
|
||||
border: solid white;
|
||||
border-width: 0 3px 3px 0;
|
||||
-webkit-transform: rotate(45deg);
|
||||
-ms-transform: rotate(45deg);
|
||||
transform: rotate(45deg);
|
||||
}
|
||||
|
||||
/* ERROR */
|
||||
.hide {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.show {
|
||||
display: block !important;
|
||||
animation-name: fadeIn;
|
||||
animation-duration: 1s;
|
||||
}
|
||||
|
||||
@keyframes fadeIn {
|
||||
0% {opacity: 0;}
|
||||
100% {opacity: 1;}
|
||||
}
|
||||
|
||||
|
||||
hr {
|
||||
background: #3e4146;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #5281bb;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #95b8e4;
|
||||
}
|
||||
|
||||
li{
|
||||
margin: 4px 0;
|
||||
}
|
||||
|
||||
/* Meter */
|
||||
.meter_background{
|
||||
background: #42464D;
|
||||
width: 100%;
|
||||
min-width: 90px;
|
||||
}
|
||||
.meter_forground{
|
||||
color: #fff;
|
||||
padding: 4px 0;
|
||||
/* + one of the colors below
|
||||
(width will be set by the JS) */
|
||||
}
|
||||
.meter_green{
|
||||
background: #43B581;
|
||||
}
|
||||
.meter_orange{
|
||||
background: #FAA61A;
|
||||
}
|
||||
.meter_red{
|
||||
background: #F04747;
|
||||
}
|
||||
.meter_value{
|
||||
padding-left: 8px;
|
||||
}
|
||||
|
||||
|
||||
/* Tables */
|
||||
table {
|
||||
width: 100%;
|
||||
margin-bottom: 50px;
|
||||
}
|
||||
|
||||
th, td {
|
||||
padding: 10px 6px;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #5d5d5d;
|
||||
}
|
||||
|
||||
|
||||
/* Navigation bar */
|
||||
nav {
|
||||
display: block;
|
||||
padding: 8px 10px;
|
||||
background: #2f3136;
|
||||
}
|
||||
|
||||
nav a {
|
||||
color: #bfbfbf;
|
||||
padding: 0.5em;
|
||||
display: inline-block;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
nav a:hover{
|
||||
background: #36393f;
|
||||
color:#cecece;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
/* Inputs and buttons */
|
||||
.upload-script, .button, button, input[type="submit"], input[type="reset"], input[type="button"] {
|
||||
display: inline-block;
|
||||
height: 38px;
|
||||
padding: 0 25px;
|
||||
color:#fff;
|
||||
text-align: center;
|
||||
font-size: 11px;
|
||||
font-weight: 600;
|
||||
line-height: 38px;
|
||||
letter-spacing: .1rem;
|
||||
text-transform: uppercase;
|
||||
text-decoration: none;
|
||||
white-space: nowrap;
|
||||
background: #2f3136;
|
||||
border-radius: 4px;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
button:hover, input[type="submit"]:hover, input[type="reset"]:hover, input[type="button"]:hover {
|
||||
background: #42444a;
|
||||
}
|
||||
|
||||
/* Forms */
|
||||
input[type="email"], input[type="number"], input[type="search"], input[type="text"], input[type="tel"], input[type="url"], input[type="password"], textarea, select {
|
||||
height: 38px;
|
||||
padding: 6px 10px; /* The 6px vertically centers text on FF, ignored by Webkit */
|
||||
background-color: #2f3136;
|
||||
border-radius: 4px;
|
||||
box-shadow: none;
|
||||
box-sizing: border-box;
|
||||
color: #d4d4d4;
|
||||
border: none;
|
||||
}
|
||||
|
||||
.setting {
|
||||
width: 100% !important;
|
||||
max-width: 284px !important;
|
||||
}
|
||||
|
||||
input[type="file"] {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* ==== GRID SYSTEM ==== */
|
||||
.container {
|
||||
width: 100%;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
max-width: 1140px;
|
||||
}
|
||||
|
||||
.row {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.row [class^="col"] {
|
||||
float: left;
|
||||
margin: 0.25rem 2%;
|
||||
min-height: 0.125rem;
|
||||
}
|
||||
|
||||
.col-1,
|
||||
.col-2,
|
||||
.col-3,
|
||||
.col-4,
|
||||
.col-5,
|
||||
.col-6,
|
||||
.col-7,
|
||||
.col-8,
|
||||
.col-9,
|
||||
.col-10,
|
||||
.col-11,
|
||||
.col-12 {
|
||||
width: 96%;
|
||||
}
|
||||
|
||||
.row::after {
|
||||
content: "";
|
||||
display: table;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.hidden-sm {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@media only screen and (min-width: 45em) {
|
||||
.col-1 {
|
||||
width: 4.33%;
|
||||
}
|
||||
|
||||
.col-2 {
|
||||
width: 12.66%;
|
||||
}
|
||||
|
||||
.col-3 {
|
||||
width: 21%;
|
||||
}
|
||||
|
||||
.col-4 {
|
||||
width: 29.33%;
|
||||
}
|
||||
|
||||
.col-5 {
|
||||
width: 37.66%;
|
||||
}
|
||||
|
||||
.col-6 {
|
||||
width: 46%;
|
||||
}
|
||||
|
||||
.col-7 {
|
||||
width: 54.33%;
|
||||
}
|
||||
|
||||
.col-8 {
|
||||
width: 62.66%;
|
||||
}
|
||||
|
||||
.col-9 {
|
||||
width: 71%;
|
||||
}
|
||||
|
||||
.col-10 {
|
||||
width: 79.33%;
|
||||
}
|
||||
|
||||
.col-11 {
|
||||
width: 87.66%;
|
||||
}
|
||||
|
||||
.col-12 {
|
||||
width: 96%;
|
||||
}
|
||||
|
||||
.hidden-sm {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
13
utils/web_converter_python/README.md
Normal file
13
utils/web_converter_python/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
Use this converter to minify and gzip everything in the `web_interface` folder and put it in `esp8266_deauther/data/web/`.
|
||||
This script will also generate a new `webfiles.h` file and replace the old in `esp8266_deauther`.
|
||||
|
||||
Copyright goes to [@xdavidhu](http://github.com/xdavidhu/).
|
||||
|
||||
**A few notes:**
|
||||
- you need python3 to run this script
|
||||
- you need to install the anglerfish package: `sudo python3 -m pip install anglerfish`
|
||||
- be sure to run the script from its current position
|
||||
- `.lang` files will always go in the `/lang` folder
|
||||
- `.js` files will always go int the `/js` folder
|
||||
- `.json` files will be ignored and not copied
|
||||
- only `.html` and `.css` will be minified before beeing gzipped (minifying JS can make problems)
|
||||
31
utils/web_converter_python/css_html_js_minify/__init__.py
Normal file
31
utils/web_converter_python/css_html_js_minify/__init__.py
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Created by: juancarlospaco
|
||||
# GitHub Repo: https://github.com/juancarlospaco/css-html-js-minify
|
||||
|
||||
|
||||
"""CSS-HTML-JS-Minify.
|
||||
|
||||
Minifier for the Web.
|
||||
"""
|
||||
|
||||
|
||||
from .minify import (process_single_html_file, process_single_js_file,
|
||||
process_single_css_file, html_minify, js_minify,
|
||||
css_minify)
|
||||
|
||||
|
||||
__version__ = '2.5.0'
|
||||
__license__ = 'GPLv3+ LGPLv3+'
|
||||
__author__ = 'Juan Carlos'
|
||||
__email__ = 'juancarlospaco@gmail.com'
|
||||
__url__ = 'https://github.com/juancarlospaco/css-html-js-minify'
|
||||
__source__ = ('https://raw.githubusercontent.com/juancarlospaco/'
|
||||
'css-html-js-minify/master/css-html-js-minify.py')
|
||||
|
||||
|
||||
__all__ = ['__version__', '__license__', '__author__',
|
||||
'__email__', '__url__', '__source__',
|
||||
'process_single_html_file', 'process_single_js_file',
|
||||
'process_single_css_file', 'html_minify', 'js_minify',
|
||||
'css_minify', 'minify']
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
344
utils/web_converter_python/css_html_js_minify/css_minifier.py
Normal file
344
utils/web_converter_python/css_html_js_minify/css_minifier.py
Normal file
@@ -0,0 +1,344 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Created by: juancarlospaco
|
||||
# GitHub Repo: https://github.com/juancarlospaco/css-html-js-minify
|
||||
|
||||
"""CSS Minifier functions for CSS-HTML-JS-Minify."""
|
||||
|
||||
|
||||
import re
|
||||
import itertools
|
||||
|
||||
import logging as log
|
||||
|
||||
from .variables import EXTENDED_NAMED_COLORS, CSS_PROPS_TEXT
|
||||
|
||||
|
||||
__all__ = ['css_minify', 'condense_semicolons']
|
||||
|
||||
|
||||
def _compile_props(props_text, grouped=False):
|
||||
"""Take a list of props and prepare them."""
|
||||
props, prefixes = [], "-webkit-,-khtml-,-epub-,-moz-,-ms-,-o-,".split(",")
|
||||
for propline in props_text.strip().lower().splitlines():
|
||||
props += [pre + pro for pro in propline.split(" ") for pre in prefixes]
|
||||
props = filter(lambda line: not line.startswith('#'), props)
|
||||
if not grouped:
|
||||
props = list(filter(None, props))
|
||||
return props, [0]*len(props)
|
||||
final_props, groups, g_id = [], [], 0
|
||||
for prop in props:
|
||||
if prop.strip():
|
||||
final_props.append(prop)
|
||||
groups.append(g_id)
|
||||
else:
|
||||
g_id += 1
|
||||
return final_props, groups
|
||||
|
||||
|
||||
def _prioritify(line_of_css, css_props_text_as_list):
|
||||
"""Return args priority, priority is integer and smaller means higher."""
|
||||
sorted_css_properties, groups_by_alphabetic_order = css_props_text_as_list
|
||||
priority_integer, group_integer = 9999, 0
|
||||
for css_property in sorted_css_properties:
|
||||
if css_property.lower() == line_of_css.split(":")[0].lower().strip():
|
||||
priority_integer = sorted_css_properties.index(css_property)
|
||||
group_integer = groups_by_alphabetic_order[priority_integer]
|
||||
break
|
||||
return priority_integer, group_integer
|
||||
|
||||
|
||||
def _props_grouper(props, pgs):
|
||||
"""Return groups for properties."""
|
||||
if not props:
|
||||
return props
|
||||
# props = sorted([
|
||||
# _ if _.strip().endswith(";")
|
||||
# and not _.strip().endswith("*/") and not _.strip().endswith("/*")
|
||||
# else _.rstrip() + ";\n" for _ in props])
|
||||
props_pg = zip(map(lambda prop: _prioritify(prop, pgs), props), props)
|
||||
props_pg = sorted(props_pg, key=lambda item: item[0][1])
|
||||
props_by_groups = map(
|
||||
lambda item: list(item[1]),
|
||||
itertools.groupby(props_pg, key=lambda item: item[0][1]))
|
||||
props_by_groups = map(lambda item: sorted(
|
||||
item, key=lambda item: item[0][0]), props_by_groups)
|
||||
props = []
|
||||
for group in props_by_groups:
|
||||
group = map(lambda item: item[1], group)
|
||||
props += group
|
||||
props += ['\n']
|
||||
props.pop()
|
||||
return props
|
||||
|
||||
|
||||
def sort_properties(css_unsorted_string):
|
||||
"""CSS Property Sorter Function.
|
||||
|
||||
This function will read buffer argument, split it to a list by lines,
|
||||
sort it by defined rule, and return sorted buffer if it's CSS property.
|
||||
This function depends on '_prioritify' function.
|
||||
"""
|
||||
log.debug("Alphabetically Sorting all CSS / SCSS Properties.")
|
||||
css_pgs = _compile_props(CSS_PROPS_TEXT, grouped=False) # Do Not Group.
|
||||
pattern = re.compile(r'(.*?{\r?\n?)(.*?)(}.*?)|(.*)',
|
||||
re.DOTALL + re.MULTILINE)
|
||||
matched_patterns = pattern.findall(css_unsorted_string)
|
||||
sorted_patterns, sorted_buffer = [], css_unsorted_string
|
||||
re_prop = re.compile(r'((?:.*?)(?:;)(?:.*?\n)|(?:.*))',
|
||||
re.DOTALL + re.MULTILINE)
|
||||
if len(matched_patterns) != 0:
|
||||
for matched_groups in matched_patterns:
|
||||
sorted_patterns += matched_groups[0].splitlines(True)
|
||||
props = map(lambda line: line.lstrip('\n'),
|
||||
re_prop.findall(matched_groups[1]))
|
||||
props = list(filter(lambda line: line.strip('\n '), props))
|
||||
props = _props_grouper(props, css_pgs)
|
||||
sorted_patterns += props
|
||||
sorted_patterns += matched_groups[2].splitlines(True)
|
||||
sorted_patterns += matched_groups[3].splitlines(True)
|
||||
sorted_buffer = ''.join(sorted_patterns)
|
||||
return sorted_buffer
|
||||
|
||||
|
||||
def remove_comments(css):
|
||||
"""Remove all CSS comment blocks."""
|
||||
log.debug("Removing all Comments.")
|
||||
iemac, preserve = False, False
|
||||
comment_start = css.find("/*")
|
||||
while comment_start >= 0: # Preserve comments that look like `/*!...*/`.
|
||||
# Slicing is used to make sure we dont get an IndexError.
|
||||
preserve = css[comment_start + 2:comment_start + 3] == "!"
|
||||
comment_end = css.find("*/", comment_start + 2)
|
||||
if comment_end < 0:
|
||||
if not preserve:
|
||||
css = css[:comment_start]
|
||||
break
|
||||
elif comment_end >= (comment_start + 2):
|
||||
if css[comment_end - 1] == "\\":
|
||||
# This is an IE Mac-specific comment; leave this one and the
|
||||
# following one alone.
|
||||
comment_start = comment_end + 2
|
||||
iemac = True
|
||||
elif iemac:
|
||||
comment_start = comment_end + 2
|
||||
iemac = False
|
||||
elif not preserve:
|
||||
css = css[:comment_start] + css[comment_end + 2:]
|
||||
else:
|
||||
comment_start = comment_end + 2
|
||||
comment_start = css.find("/*", comment_start)
|
||||
return css
|
||||
|
||||
|
||||
def remove_unnecessary_whitespace(css):
|
||||
"""Remove unnecessary whitespace characters."""
|
||||
log.debug("Removing all unnecessary white spaces.")
|
||||
|
||||
def pseudoclasscolon(css):
|
||||
"""Prevent 'p :link' from becoming 'p:link'.
|
||||
|
||||
Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'.
|
||||
This is translated back again later.
|
||||
"""
|
||||
regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)")
|
||||
match = regex.search(css)
|
||||
while match:
|
||||
css = ''.join([
|
||||
css[:match.start()],
|
||||
match.group().replace(":", "___PSEUDOCLASSCOLON___"),
|
||||
css[match.end():]])
|
||||
match = regex.search(css)
|
||||
return css
|
||||
|
||||
css = pseudoclasscolon(css)
|
||||
# Remove spaces from before things.
|
||||
css = re.sub(r"\s+([!{};:>\(\)\],])", r"\1", css)
|
||||
# If there is a `@charset`, then only allow one, and move to beginning.
|
||||
css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css)
|
||||
css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css)
|
||||
# Put the space back in for a few cases, such as `@media screen` and
|
||||
# `(-webkit-min-device-pixel-ratio:0)`.
|
||||
css = re.sub(r"\band\(", "and (", css)
|
||||
# Put the colons back.
|
||||
css = css.replace('___PSEUDOCLASSCOLON___', ':')
|
||||
# Remove spaces from after things.
|
||||
css = re.sub(r"([!{}:;>\(\[,])\s+", r"\1", css)
|
||||
return css
|
||||
|
||||
|
||||
def remove_unnecessary_semicolons(css):
|
||||
"""Remove unnecessary semicolons."""
|
||||
log.debug("Removing all unnecessary semicolons.")
|
||||
return re.sub(r";+\}", "}", css)
|
||||
|
||||
|
||||
def remove_empty_rules(css):
|
||||
"""Remove empty rules."""
|
||||
log.debug("Removing all unnecessary empty rules.")
|
||||
return re.sub(r"[^\}\{]+\{\}", "", css)
|
||||
|
||||
|
||||
def normalize_rgb_colors_to_hex(css):
|
||||
"""Convert `rgb(51,102,153)` to `#336699`."""
|
||||
log.debug("Converting all rgba to hexadecimal color values.")
|
||||
regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)")
|
||||
match = regex.search(css)
|
||||
while match:
|
||||
colors = map(lambda s: s.strip(), match.group(1).split(","))
|
||||
hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))
|
||||
css = css.replace(match.group(), hexcolor)
|
||||
match = regex.search(css)
|
||||
return css
|
||||
|
||||
|
||||
def condense_zero_units(css):
|
||||
"""Replace `0(px, em, %, etc)` with `0`."""
|
||||
log.debug("Condensing all zeroes on values.")
|
||||
return re.sub(r"([\s:])(0)(px|em|%|in|q|ch|cm|mm|pc|pt|ex|rem|s|ms|"
|
||||
r"deg|grad|rad|turn|vw|vh|vmin|vmax|fr)", r"\1\2", css)
|
||||
|
||||
|
||||
def condense_multidimensional_zeros(css):
|
||||
"""Replace `:0 0 0 0;`, `:0 0 0;` etc. with `:0;`."""
|
||||
log.debug("Condensing all multidimensional zeroes on values.")
|
||||
return css.replace(":0 0 0 0;", ":0;").replace(
|
||||
":0 0 0;", ":0;").replace(":0 0;", ":0;").replace(
|
||||
"background-position:0;", "background-position:0 0;").replace(
|
||||
"transform-origin:0;", "transform-origin:0 0;")
|
||||
|
||||
|
||||
def condense_floating_points(css):
|
||||
"""Replace `0.6` with `.6` where possible."""
|
||||
log.debug("Condensing all floating point values.")
|
||||
return re.sub(r"(:|\s)0+\.(\d+)", r"\1.\2", css)
|
||||
|
||||
|
||||
def condense_hex_colors(css):
|
||||
"""Shorten colors from #AABBCC to #ABC where possible."""
|
||||
log.debug("Condensing all hexadecimal color values.")
|
||||
regex = re.compile(
|
||||
r"""([^\"'=\s])(\s*)#([0-9a-f])([0-9a-f])([0-9a-f])"""
|
||||
r"""([0-9a-f])([0-9a-f])([0-9a-f])""", re.I | re.S)
|
||||
match = regex.search(css)
|
||||
while match:
|
||||
first = match.group(3) + match.group(5) + match.group(7)
|
||||
second = match.group(4) + match.group(6) + match.group(8)
|
||||
if first.lower() == second.lower():
|
||||
css = css.replace(
|
||||
match.group(), match.group(1) + match.group(2) + '#' + first)
|
||||
match = regex.search(css, match.end() - 3)
|
||||
else:
|
||||
match = regex.search(css, match.end())
|
||||
return css
|
||||
|
||||
|
||||
def condense_whitespace(css):
|
||||
"""Condense multiple adjacent whitespace characters into one."""
|
||||
log.debug("Condensing all unnecessary white spaces.")
|
||||
return re.sub(r"\s+", " ", css)
|
||||
|
||||
|
||||
def condense_semicolons(css):
|
||||
"""Condense multiple adjacent semicolon characters into one."""
|
||||
log.debug("Condensing all unnecessary multiple adjacent semicolons.")
|
||||
return re.sub(r";;+", ";", css)
|
||||
|
||||
|
||||
def wrap_css_lines(css, line_length=80):
|
||||
"""Wrap the lines of the given CSS to an approximate length."""
|
||||
log.debug("Wrapping lines to ~{0} max line lenght.".format(line_length))
|
||||
lines, line_start = [], 0
|
||||
for i, char in enumerate(css):
|
||||
# Its safe to break after } characters.
|
||||
if char == '}' and (i - line_start >= line_length):
|
||||
lines.append(css[line_start:i + 1])
|
||||
line_start = i + 1
|
||||
if line_start < len(css):
|
||||
lines.append(css[line_start:])
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
def condense_font_weight(css):
|
||||
"""Condense multiple font weights into shorter integer equals."""
|
||||
log.debug("Condensing font weights on values.")
|
||||
return css.replace('font-weight:normal;', 'font-weight:400;').replace(
|
||||
'font-weight:bold;', 'font-weight:700;')
|
||||
|
||||
|
||||
def condense_std_named_colors(css):
|
||||
"""Condense named color values to shorter replacement using HEX."""
|
||||
log.debug("Condensing standard named color values.")
|
||||
for color_name, color_hexa in iter(tuple({
|
||||
':aqua;': ':#0ff;', ':blue;': ':#00f;',
|
||||
':fuchsia;': ':#f0f;', ':yellow;': ':#ff0;'}.items())):
|
||||
css = css.replace(color_name, color_hexa)
|
||||
return css
|
||||
|
||||
|
||||
def condense_xtra_named_colors(css):
|
||||
"""Condense named color values to shorter replacement using HEX."""
|
||||
log.debug("Condensing extended named color values.")
|
||||
for k, v in iter(tuple(EXTENDED_NAMED_COLORS.items())):
|
||||
same_color_but_rgb = 'rgb({0},{1},{2})'.format(v[0], v[1], v[2])
|
||||
if len(k) > len(same_color_but_rgb):
|
||||
css = css.replace(k, same_color_but_rgb)
|
||||
return css
|
||||
|
||||
|
||||
def remove_url_quotes(css):
|
||||
"""Fix for url() does not need quotes."""
|
||||
log.debug("Removing quotes from url.")
|
||||
return re.sub(r'url\((["\'])([^)]*)\1\)', r'url(\2)', css)
|
||||
|
||||
|
||||
def condense_border_none(css):
|
||||
"""Condense border:none; to border:0;."""
|
||||
log.debug("Condense borders values.")
|
||||
return css.replace("border:none;", "border:0;")
|
||||
|
||||
|
||||
def add_encoding(css):
|
||||
"""Add @charset 'UTF-8'; if missing."""
|
||||
log.debug("Adding encoding declaration if needed.")
|
||||
return "@charset utf-8;" + css if "@charset" not in css.lower() else css
|
||||
|
||||
|
||||
def restore_needed_space(css):
|
||||
"""Fix CSS for some specific cases where a white space is needed."""
|
||||
return css.replace("!important", " !important").replace( # !important
|
||||
"@media(", "@media (").replace( # media queries # jpeg > jpg
|
||||
"data:image/jpeg;base64,", "data:image/jpg;base64,").rstrip("\n;")
|
||||
|
||||
|
||||
def unquote_selectors(css):
|
||||
"""Fix CSS for some specific selectors where Quotes is not needed."""
|
||||
log.debug("Removing unnecessary Quotes on selectors of CSS classes.")
|
||||
return re.compile('([a-zA-Z]+)="([a-zA-Z0-9-_\.]+)"]').sub(r'\1=\2]', css)
|
||||
|
||||
|
||||
def css_minify(css, wrap=False, comments=False, sort=False, noprefix=False):
|
||||
"""Minify CSS main function."""
|
||||
log.info("Compressing CSS...")
|
||||
css = remove_comments(css) if not comments else css
|
||||
css = sort_properties(css) if sort else css
|
||||
css = unquote_selectors(css)
|
||||
css = condense_whitespace(css)
|
||||
css = remove_url_quotes(css)
|
||||
css = condense_xtra_named_colors(css)
|
||||
css = condense_font_weight(css)
|
||||
css = remove_unnecessary_whitespace(css)
|
||||
css = condense_std_named_colors(css)
|
||||
css = remove_unnecessary_semicolons(css)
|
||||
css = condense_zero_units(css)
|
||||
css = condense_multidimensional_zeros(css)
|
||||
css = condense_floating_points(css)
|
||||
css = normalize_rgb_colors_to_hex(css)
|
||||
css = condense_hex_colors(css)
|
||||
css = condense_border_none(css)
|
||||
css = wrap_css_lines(css, 80) if wrap else css
|
||||
css = condense_semicolons(css)
|
||||
css = add_encoding(css) if not noprefix else css
|
||||
css = restore_needed_space(css)
|
||||
log.info("Finished compressing CSS !.")
|
||||
return css.strip()
|
||||
157
utils/web_converter_python/css_html_js_minify/html_minifier.py
Normal file
157
utils/web_converter_python/css_html_js_minify/html_minifier.py
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Created by: juancarlospaco
|
||||
# GitHub Repo: https://github.com/juancarlospaco/css-html-js-minify
|
||||
|
||||
"""HTML Minifier functions for CSS-HTML-JS-Minify."""
|
||||
|
||||
|
||||
import re
|
||||
|
||||
import logging as log
|
||||
|
||||
|
||||
__all__ = ['html_minify']
|
||||
|
||||
|
||||
def condense_html_whitespace(html):
|
||||
"""Condense HTML, but be safe first if it have textareas or pre tags.
|
||||
|
||||
>>> condense_html_whitespace('<i> <b> <a> test </a> </b> </i><br>')
|
||||
'<i><b><a> test </a></b></i><br>'
|
||||
""" # first space between tags, then empty new lines and in-between.
|
||||
log.debug("Removing unnecessary HTML White Spaces and Empty New Lines.")
|
||||
tagsStack = []
|
||||
split = re.split('(<\\s*pre.*>|<\\s*/\\s*pre\\s*>|<\\s*textarea.*>|<\\s*/\\s*textarea\\s*>)', html, flags=re.IGNORECASE)
|
||||
for i in range(0, len(split)):
|
||||
#if we are on a tag
|
||||
if (i + 1) % 2 == 0:
|
||||
tag = rawtag(split[i])
|
||||
if tag.startswith('/'):
|
||||
if not tagsStack or '/' + tagsStack.pop() != tag:
|
||||
raise Exception("Some tag is not closed properly")
|
||||
else:
|
||||
tagsStack.append(tag)
|
||||
continue
|
||||
|
||||
#else check if we are outside any nested <pre>/<textarea> tag
|
||||
if not tagsStack:
|
||||
temp = re.sub(r'>\s+<', '> <', split[i])
|
||||
split[i] = re.sub(r'\s{2,}|[\r\n]', ' ', temp)
|
||||
return ''.join(split)
|
||||
|
||||
|
||||
def rawtag(str):
|
||||
if re.match('<\\s*pre.*>', str, flags=re.IGNORECASE):
|
||||
return 'pre'
|
||||
if re.match('<\\s*textarea.*>', str, flags=re.IGNORECASE):
|
||||
return 'txt'
|
||||
if re.match('<\\s*/\\s*pre\\s*>', str, flags=re.IGNORECASE):
|
||||
return '/pre'
|
||||
if re.match('<\\s*/\\s*textarea\\s*>', str, flags=re.IGNORECASE):
|
||||
return '/txt'
|
||||
|
||||
def condense_style(html):
|
||||
"""Condense style html tags.
|
||||
|
||||
>>> condense_style('<style type="text/css">*{border:0}</style><p>a b c')
|
||||
'<style>*{border:0}</style><p>a b c'
|
||||
""" # May look silly but Emmet does this and is wrong.
|
||||
log.debug("Condensing HTML Style CSS tags.")
|
||||
return html.replace('<style type="text/css">', '<style>').replace(
|
||||
"<style type='text/css'>", '<style>').replace(
|
||||
"<style type=text/css>", '<style>')
|
||||
|
||||
|
||||
def condense_script(html):
|
||||
"""Condense script html tags.
|
||||
|
||||
>>> condense_script('<script type="text/javascript"> </script><p>a b c')
|
||||
'<script> </script><p>a b c'
|
||||
""" # May look silly but Emmet does this and is wrong.
|
||||
log.debug("Condensing HTML Script JS tags.")
|
||||
return html.replace('<script type="text/javascript">', '<script>').replace(
|
||||
"<style type='text/javascript'>", '<script>').replace(
|
||||
"<style type=text/javascript>", '<script>')
|
||||
|
||||
|
||||
def clean_unneeded_html_tags(html):
|
||||
"""Clean unneeded optional html tags.
|
||||
|
||||
>>> clean_unneeded_html_tags('a<body></img></td>b</th></tr></hr></br>c')
|
||||
'abc'
|
||||
"""
|
||||
log.debug("Removing unnecessary optional HTML tags.")
|
||||
for tag_to_remove in ("""</area> </base> <body> </body> </br> </col>
|
||||
</colgroup> </dd> </dt> <head> </head> </hr> <html> </html> </img>
|
||||
</input> </li> </link> </meta> </option> </param> <tbody> </tbody>
|
||||
</td> </tfoot> </th> </thead> </tr> </basefont> </isindex> </param>
|
||||
""".split()):
|
||||
html = html.replace(tag_to_remove, '')
|
||||
return html # May look silly but Emmet does this and is wrong.
|
||||
|
||||
|
||||
def remove_html_comments(html):
|
||||
"""Remove all HTML comments, Keep all for Grunt, Grymt and IE.
|
||||
|
||||
>>> _="<!-- build:dev -->a<!-- endbuild -->b<!--[if IE 7]>c<![endif]--> "
|
||||
>>> _+= "<!-- kill me please -->keep" ; remove_html_comments(_)
|
||||
'<!-- build:dev -->a<!-- endbuild -->b<!--[if IE 7]>c<![endif]--> keep'
|
||||
""" # Grunt uses comments to as build arguments, bad practice but still.
|
||||
log.debug("""Removing all unnecessary HTML comments; Keep all containing:
|
||||
'build:', 'endbuild', '<!--[if]>', '<![endif]-->' for Grunt/Grymt, IE.""")
|
||||
return re.compile('<!-- [^(build|endbuild)].*? -->', re.I).sub('', html)
|
||||
|
||||
|
||||
def unquote_html_attributes(html):
|
||||
"""Remove all HTML quotes on attibutes if possible.
|
||||
|
||||
>>> unquote_html_attributes('<img width="9" height="5" data-foo="0" >')
|
||||
'<img width=9 height=5 data-foo=0 >'
|
||||
""" # data-foo=0> might cause errors on IE, we leave 1 space data-foo=0 >
|
||||
log.debug("Removing unnecessary Quotes on attributes of HTML tags.")
|
||||
# cache all regular expressions on variables before we enter the for loop.
|
||||
any_tag = re.compile(r"<\w.*?>", re.I | re.MULTILINE | re.DOTALL)
|
||||
space = re.compile(r' \s+|\s +', re.MULTILINE)
|
||||
space1 = re.compile(r'\w\s+\w', re.MULTILINE)
|
||||
space2 = re.compile(r'"\s+>', re.MULTILINE)
|
||||
space3 = re.compile(r"'\s+>", re.MULTILINE)
|
||||
space4 = re.compile('"\s\s+\w+="|\'\s\s+\w+=\'|"\s\s+\w+=|\'\s\s+\w+=',
|
||||
re.MULTILINE)
|
||||
space6 = re.compile(r"\d\s+>", re.MULTILINE)
|
||||
quotes_in_tag = re.compile('([a-zA-Z]+)="([a-zA-Z0-9-_\.]+)"')
|
||||
# iterate on a for loop cleaning stuff up on the html markup.
|
||||
for tag in iter(any_tag.findall(html)):
|
||||
# exceptions of comments and closing tags
|
||||
if tag.startswith('<!') or tag.find('</') > -1:
|
||||
continue
|
||||
original = tag
|
||||
# remove white space inside the tag itself
|
||||
tag = space2.sub('" >', tag) # preserve 1 white space is safer
|
||||
tag = space3.sub("' >", tag)
|
||||
for each in space1.findall(tag) + space6.findall(tag):
|
||||
tag = tag.replace(each, space.sub(' ', each))
|
||||
for each in space4.findall(tag):
|
||||
tag = tag.replace(each, each[0] + ' ' + each[1:].lstrip())
|
||||
# remove quotes on some attributes
|
||||
tag = quotes_in_tag.sub(r'\1=\2 ', tag) # See Bug #28
|
||||
if original != tag: # has the tag been improved ?
|
||||
html = html.replace(original, tag)
|
||||
return html.strip()
|
||||
|
||||
|
||||
def html_minify(html, comments=False):
|
||||
"""Minify HTML main function.
|
||||
|
||||
>>> html_minify(' <p width="9" height="5" > <!-- a --> b </p> c <br> ')
|
||||
'<p width=9 height=5 > b c <br>'
|
||||
"""
|
||||
log.info("Compressing HTML...")
|
||||
html = remove_html_comments(html) if not comments else html
|
||||
html = condense_style(html)
|
||||
html = condense_script(html)
|
||||
html = clean_unneeded_html_tags(html)
|
||||
html = condense_html_whitespace(html)
|
||||
html = unquote_html_attributes(html)
|
||||
log.info("Finished compressing HTML !.")
|
||||
return html.strip()
|
||||
184
utils/web_converter_python/css_html_js_minify/js_minifier.py
Normal file
184
utils/web_converter_python/css_html_js_minify/js_minifier.py
Normal file
@@ -0,0 +1,184 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Created by: juancarlospaco
|
||||
# GitHub Repo: https://github.com/juancarlospaco/css-html-js-minify
|
||||
# Modified by: xdavidhu
|
||||
|
||||
"""JavaScript Minifier functions for CSS-HTML-JS-Minify."""
|
||||
|
||||
|
||||
from io import StringIO # pure-Python StringIO supports unicode.
|
||||
|
||||
import logging as log
|
||||
import re
|
||||
|
||||
from .css_minifier import condense_semicolons
|
||||
|
||||
|
||||
__all__ = ['js_minify']
|
||||
|
||||
|
||||
def remove_commented_lines(js):
|
||||
"""Force remove commented out lines from Javascript."""
|
||||
log.debug("Force remove commented out lines from Javascript.")
|
||||
result = ""
|
||||
for line in js.splitlines():
|
||||
line = re.sub(r"/\*.*\*/" ,"" ,line) # (/*COMMENT */)
|
||||
line = re.sub(r"//.*","" ,line) # (//COMMENT)
|
||||
result += '\n'+line
|
||||
return result
|
||||
|
||||
|
||||
def simple_replacer_js(js):
|
||||
"""Force strip simple replacements from Javascript."""
|
||||
log.debug("Force strip simple replacements from Javascript.")
|
||||
return condense_semicolons(js.replace("debugger;", ";").replace(
|
||||
";}", "}").replace("; ", ";").replace(" ;", ";").rstrip("\n;"))
|
||||
|
||||
|
||||
def js_minify_keep_comments(js):
|
||||
"""Return a minified version of the Javascript string."""
|
||||
log.info("Compressing Javascript...")
|
||||
ins, outs = StringIO(js), StringIO()
|
||||
JavascriptMinify(ins, outs).minify()
|
||||
return force_single_line_js(outs.getvalue())
|
||||
|
||||
|
||||
def force_single_line_js(js):
|
||||
"""Force Javascript to a single line, even if need to add semicolon."""
|
||||
log.debug("Forcing JS from ~{0} to 1 Line.".format(len(js.splitlines())))
|
||||
return ";".join(js.splitlines()) if len(js.splitlines()) > 1 else js
|
||||
|
||||
|
||||
class JavascriptMinify(object):
|
||||
|
||||
"""Minify an input stream of Javascript, writing to an output stream."""
|
||||
|
||||
def __init__(self, instream=None, outstream=None):
|
||||
"""Init class."""
|
||||
self.ins, self.outs = instream, outstream
|
||||
|
||||
def minify(self, instream=None, outstream=None):
|
||||
"""Minify Javascript using StringIO."""
|
||||
if instream and outstream:
|
||||
self.ins, self.outs = instream, outstream
|
||||
write, read = self.outs.write, self.ins.read
|
||||
space_strings = ("abcdefghijklmnopqrstuvwxyz"
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\")
|
||||
starters, enders = '{[(+-', '}])+-"\''
|
||||
newlinestart_strings = starters + space_strings
|
||||
newlineend_strings = enders + space_strings
|
||||
do_newline, do_space = False, False
|
||||
doing_single_comment, doing_multi_comment = False, False
|
||||
previous_before_comment, in_quote = '', ''
|
||||
in_re, quote_buf = False, []
|
||||
previous = read(1)
|
||||
next1 = read(1)
|
||||
if previous == '/':
|
||||
if next1 == '/':
|
||||
doing_single_comment = True
|
||||
elif next1 == '*':
|
||||
doing_multi_comment = True
|
||||
else:
|
||||
write(previous)
|
||||
elif not previous:
|
||||
return
|
||||
elif previous >= '!':
|
||||
if previous in "'\"":
|
||||
in_quote = previous
|
||||
write(previous)
|
||||
previous_non_space = previous
|
||||
else:
|
||||
previous_non_space = ' '
|
||||
if not next1:
|
||||
return
|
||||
while True:
|
||||
next2 = read(1)
|
||||
if not next2:
|
||||
last = next1.strip()
|
||||
conditional_1 = (doing_single_comment or doing_multi_comment)
|
||||
if not conditional_1 and last not in ('', '/'):
|
||||
write(last)
|
||||
break
|
||||
if doing_multi_comment:
|
||||
if next1 == '*' and next2 == '/':
|
||||
doing_multi_comment = False
|
||||
next2 = read(1)
|
||||
elif doing_single_comment:
|
||||
if next1 in '\r\n':
|
||||
doing_single_comment = False
|
||||
while next2 in '\r\n':
|
||||
next2 = read(1)
|
||||
if not next2:
|
||||
break
|
||||
if previous_before_comment in ')}]':
|
||||
do_newline = True
|
||||
elif previous_before_comment in space_strings:
|
||||
write('\n')
|
||||
elif in_quote:
|
||||
quote_buf.append(next1)
|
||||
|
||||
if next1 == in_quote:
|
||||
numslashes = 0
|
||||
for c in reversed(quote_buf[:-1]):
|
||||
if c != '\\':
|
||||
break
|
||||
else:
|
||||
numslashes += 1
|
||||
if numslashes % 2 == 0:
|
||||
in_quote = ''
|
||||
write(''.join(quote_buf))
|
||||
elif next1 in '\r\n':
|
||||
conditional_2 = previous_non_space in newlineend_strings
|
||||
if conditional_2 or previous_non_space > '~':
|
||||
while 1:
|
||||
if next2 < '!':
|
||||
next2 = read(1)
|
||||
if not next2:
|
||||
break
|
||||
else:
|
||||
conditional_3 = next2 in newlinestart_strings
|
||||
if conditional_3 or next2 > '~' or next2 == '/':
|
||||
do_newline = True
|
||||
break
|
||||
elif next1 < '!' and not in_re:
|
||||
conditional_4 = next2 in space_strings or next2 > '~'
|
||||
conditional_5 = previous_non_space in space_strings
|
||||
conditional_6 = previous_non_space > '~'
|
||||
if (conditional_5 or conditional_6) and (conditional_4):
|
||||
do_space = True
|
||||
elif next1 == '/':
|
||||
if in_re:
|
||||
if previous != '\\':
|
||||
in_re = False
|
||||
write('/')
|
||||
elif next2 == '/':
|
||||
doing_single_comment = True
|
||||
previous_before_comment = previous_non_space
|
||||
elif next2 == '*':
|
||||
doing_multi_comment = True
|
||||
else:
|
||||
in_re = previous_non_space in '(,=:[?!&|'
|
||||
write('/')
|
||||
else:
|
||||
if do_space:
|
||||
do_space = False
|
||||
write(' ')
|
||||
if do_newline:
|
||||
write('\n')
|
||||
do_newline = False
|
||||
write(next1)
|
||||
if not in_re and next1 in "'\"":
|
||||
in_quote = next1
|
||||
quote_buf = []
|
||||
previous = next1
|
||||
next1 = next2
|
||||
if previous >= '!':
|
||||
previous_non_space = previous
|
||||
|
||||
|
||||
def js_minify(js):
|
||||
"""Minify a JavaScript string."""
|
||||
js = remove_commented_lines(js)
|
||||
js = js_minify_keep_comments(js)
|
||||
return js.strip()
|
||||
312
utils/web_converter_python/css_html_js_minify/minify.py
Normal file
312
utils/web_converter_python/css_html_js_minify/minify.py
Normal file
@@ -0,0 +1,312 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Created by: juancarlospaco
|
||||
# GitHub Repo: https://github.com/juancarlospaco/css-html-js-minify
|
||||
|
||||
"""CSS-HTML-JS-Minify.
|
||||
|
||||
Minifier for the Web.
|
||||
"""
|
||||
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
import gzip
|
||||
import logging as log
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from hashlib import sha1
|
||||
from multiprocessing import Pool, cpu_count
|
||||
from subprocess import getoutput
|
||||
from time import sleep
|
||||
|
||||
from .css_minifier import css_minify
|
||||
from .html_minifier import html_minify
|
||||
from .js_minifier import js_minify
|
||||
|
||||
from anglerfish import (check_encoding, check_folder, make_logger,
|
||||
make_post_exec_msg, set_process_name,
|
||||
set_single_instance, walk2list, beep,
|
||||
set_terminal_title)
|
||||
|
||||
|
||||
__version__ = '2.5.0'
|
||||
__license__ = 'GPLv3+ LGPLv3+'
|
||||
__author__ = 'Juan Carlos'
|
||||
__email__ = 'juancarlospaco@gmail.com'
|
||||
__url__ = 'https://github.com/juancarlospaco/css-html-js-minify'
|
||||
__source__ = ('https://raw.githubusercontent.com/juancarlospaco/'
|
||||
'css-html-js-minify/master/css-html-js-minify.py')
|
||||
|
||||
|
||||
start_time = datetime.now()
|
||||
|
||||
|
||||
##############################################################################
|
||||
|
||||
|
||||
def process_multiple_files(file_path, watch=False, wrap=False, timestamp=False,
|
||||
comments=False, sort=False, overwrite=False,
|
||||
zipy=False, prefix='', add_hash=False):
|
||||
"""Process multiple CSS, JS, HTML files with multiprocessing."""
|
||||
log.debug("Process {} is Compressing {}.".format(os.getpid(), file_path))
|
||||
if watch:
|
||||
previous = int(os.stat(file_path).st_mtime)
|
||||
log.info("Process {} is Watching {}.".format(os.getpid(), file_path))
|
||||
while True:
|
||||
actual = int(os.stat(file_path).st_mtime)
|
||||
if previous == actual:
|
||||
sleep(60)
|
||||
else:
|
||||
previous = actual
|
||||
log.debug("Modification detected on {0}.".format(file_path))
|
||||
check_folder(os.path.dirname(file_path))
|
||||
if file_path.endswith(".css"):
|
||||
process_single_css_file(
|
||||
file_path, wrap=wrap, timestamp=timestamp,
|
||||
comments=comments, sort=sort, overwrite=overwrite,
|
||||
zipy=zipy, prefix=prefix, add_hash=add_hash)
|
||||
elif file_path.endswith(".js"):
|
||||
process_single_js_file(
|
||||
file_path, timestamp=timestamp,
|
||||
overwrite=overwrite, zipy=zipy)
|
||||
else:
|
||||
process_single_html_file(
|
||||
file_path, comments=comments,
|
||||
overwrite=overwrite, prefix=prefix, add_hash=add_hash)
|
||||
else:
|
||||
if file_path.endswith(".css"):
|
||||
process_single_css_file(
|
||||
file_path, wrap=wrap, timestamp=timestamp,
|
||||
comments=comments, sort=sort, overwrite=overwrite, zipy=zipy,
|
||||
prefix=prefix, add_hash=add_hash)
|
||||
elif file_path.endswith(".js"):
|
||||
process_single_js_file(
|
||||
file_path, timestamp=timestamp,
|
||||
overwrite=overwrite, zipy=zipy)
|
||||
else:
|
||||
process_single_html_file(
|
||||
file_path, comments=comments,
|
||||
overwrite=overwrite, prefix=prefix, add_hash=add_hash)
|
||||
|
||||
|
||||
def prefixer_extensioner(file_path, old, new,
|
||||
file_content=None, prefix='', add_hash=False):
|
||||
"""Take a file path and safely preppend a prefix and change extension.
|
||||
|
||||
This is needed because filepath.replace('.foo', '.bar') sometimes may
|
||||
replace '/folder.foo/file.foo' into '/folder.bar/file.bar' wrong!.
|
||||
>>> prefixer_extensioner('/tmp/test.js', '.js', '.min.js')
|
||||
'/tmp/test.min.js'
|
||||
"""
|
||||
log.debug("Prepending '{}' Prefix to {}.".format(new.upper(), file_path))
|
||||
extension = os.path.splitext(file_path)[1].lower().replace(old, new)
|
||||
filenames = os.path.splitext(os.path.basename(file_path))[0]
|
||||
filenames = prefix + filenames if prefix else filenames
|
||||
if add_hash and file_content: # http://stackoverflow.com/a/25568916
|
||||
filenames += "-" + sha1(file_content.encode("utf-8")).hexdigest()[:11]
|
||||
log.debug("Appending SHA1 HEX-Digest Hash to '{}'.".format(file_path))
|
||||
dir_names = os.path.dirname(file_path)
|
||||
file_path = os.path.join(dir_names, filenames + extension)
|
||||
return file_path
|
||||
|
||||
|
||||
def process_single_css_file(css_file_path, wrap=False, timestamp=False,
|
||||
comments=False, sort=False, overwrite=False,
|
||||
zipy=False, prefix='', add_hash=False,
|
||||
output_path=None):
|
||||
"""Process a single CSS file."""
|
||||
log.info("Processing CSS file: {0}.".format(css_file_path))
|
||||
with open(css_file_path, encoding="utf-8") as css_file:
|
||||
original_css = css_file.read()
|
||||
log.debug("INPUT: Reading CSS file {}.".format(css_file_path))
|
||||
minified_css = css_minify(original_css, wrap=wrap,
|
||||
comments=comments, sort=sort)
|
||||
if timestamp:
|
||||
taim = "/* {0} */ ".format(datetime.now().isoformat()[:-7].lower())
|
||||
minified_css = taim + minified_css
|
||||
if output_path is None:
|
||||
min_css_file_path = prefixer_extensioner(
|
||||
css_file_path, ".css", ".css" if overwrite else ".min.css",
|
||||
original_css, prefix=prefix, add_hash=add_hash)
|
||||
if zipy:
|
||||
gz_file_path = prefixer_extensioner(
|
||||
css_file_path, ".css",
|
||||
".css.gz" if overwrite else ".min.css.gz", original_css,
|
||||
prefix=prefix, add_hash=add_hash)
|
||||
log.debug("OUTPUT: Writing ZIP CSS {}.".format(gz_file_path))
|
||||
else:
|
||||
min_css_file_path = gz_file_path = output_path
|
||||
if not zipy or output_path is None:
|
||||
# if specific output path is requested,write write only one output file
|
||||
with open(min_css_file_path, "w", encoding="utf-8") as output_file:
|
||||
output_file.write(minified_css)
|
||||
if zipy:
|
||||
with gzip.open(gz_file_path, "wt", encoding="utf-8") as output_gz:
|
||||
output_gz.write(minified_css)
|
||||
log.debug("OUTPUT: Writing CSS Minified {0}.".format(min_css_file_path))
|
||||
return min_css_file_path
|
||||
|
||||
|
||||
def process_single_html_file(html_file_path, comments=False, overwrite=False,
|
||||
prefix='', add_hash=False, output_path=None):
|
||||
"""Process a single HTML file."""
|
||||
log.info("Processing HTML file: {0}.".format(html_file_path))
|
||||
with open(html_file_path, encoding="utf-8") as html_file:
|
||||
minified_html = html_minify(html_file.read(), comments=comments)
|
||||
log.debug("INPUT: Reading HTML file {0}.".format(html_file_path))
|
||||
if output_path is None:
|
||||
html_file_path = prefixer_extensioner(
|
||||
html_file_path, ".html" if overwrite else ".htm", ".html",
|
||||
prefix=prefix, add_hash=add_hash)
|
||||
else:
|
||||
html_file_path = output_path
|
||||
with open(html_file_path, "w", encoding="utf-8") as output_file:
|
||||
output_file.write(minified_html)
|
||||
log.debug("OUTPUT: Writing HTML Minified {0}.".format(html_file_path))
|
||||
return html_file_path
|
||||
|
||||
|
||||
def process_single_js_file(js_file_path, timestamp=False, overwrite=False,
|
||||
zipy=False, output_path=None):
|
||||
"""Process a single JS file."""
|
||||
log.info("Processing JS file: {0}.".format(js_file_path))
|
||||
with open(js_file_path, encoding="utf-8") as js_file:
|
||||
original_js = js_file.read()
|
||||
log.debug("INPUT: Reading JS file {0}.".format(js_file_path))
|
||||
minified_js = js_minify(original_js)
|
||||
if timestamp:
|
||||
taim = "/* {} */ ".format(datetime.now().isoformat()[:-7].lower())
|
||||
minified_js = taim + minified_js
|
||||
if output_path is None:
|
||||
min_js_file_path = prefixer_extensioner(
|
||||
js_file_path, ".js", ".js" if overwrite else ".min.js",
|
||||
original_js)
|
||||
if zipy:
|
||||
gz_file_path = prefixer_extensioner(
|
||||
js_file_path, ".js", ".js.gz" if overwrite else ".min.js.gz",
|
||||
original_js)
|
||||
log.debug("OUTPUT: Writing ZIP JS {}.".format(gz_file_path))
|
||||
else:
|
||||
min_js_file_path = gz_file_path = output_path
|
||||
if not zipy or output_path is None:
|
||||
# if specific output path is requested,write write only one output file
|
||||
with open(min_js_file_path, "w", encoding="utf-8") as output_file:
|
||||
output_file.write(minified_js)
|
||||
if zipy:
|
||||
with gzip.open(gz_file_path, "wt", encoding="utf-8") as output_gz:
|
||||
output_gz.write(minified_js)
|
||||
log.debug("OUTPUT: Writing JS Minified {0}.".format(min_js_file_path))
|
||||
return min_js_file_path
|
||||
|
||||
|
||||
def make_arguments_parser():
|
||||
"""Build and return a command line agument parser."""
|
||||
parser = ArgumentParser(description=__doc__, epilog="""CSS-HTML-JS-Minify:
|
||||
Takes a file or folder full path string and process all CSS/HTML/JS found.
|
||||
If argument is not file/folder will fail. Check Updates works on Python3.
|
||||
Std-In to Std-Out is deprecated since it may fail with unicode characters.
|
||||
SHA1 HEX-Digest 11 Chars Hash on Filenames is used for Server Cache.
|
||||
CSS Properties are Alpha-Sorted, to help spot cloned ones, Selectors not.
|
||||
Watch works for whole folders, with minimum of ~60 Secs between runs.""")
|
||||
parser.add_argument('--version', action='version', version=__version__)
|
||||
parser.add_argument('fullpath', metavar='fullpath', type=str,
|
||||
help='Full path to local file or folder.')
|
||||
parser.add_argument('--wrap', action='store_true',
|
||||
help="Wrap output to ~80 chars per line, CSS only.")
|
||||
parser.add_argument('--prefix', type=str,
|
||||
help="Prefix string to prepend on output filenames.")
|
||||
parser.add_argument('--timestamp', action='store_true',
|
||||
help="Add a Time Stamp on all CSS/JS output files.")
|
||||
parser.add_argument('--quiet', action='store_true', help="Quiet, Silent.")
|
||||
parser.add_argument('--hash', action='store_true',
|
||||
help="Add SHA1 HEX-Digest 11chars Hash to Filenames.")
|
||||
parser.add_argument('--zipy', action='store_true',
|
||||
help="GZIP Minified files as '*.gz', CSS/JS only.")
|
||||
parser.add_argument('--sort', action='store_true',
|
||||
help="Alphabetically Sort CSS Properties, CSS only.")
|
||||
parser.add_argument('--comments', action='store_true',
|
||||
help="Keep comments, CSS/HTML only (Not Recommended)")
|
||||
parser.add_argument('--overwrite', action='store_true',
|
||||
help="Force overwrite all in-place (Not Recommended)")
|
||||
parser.add_argument('--after', type=str,
|
||||
help="Command to execute after run (Experimental).")
|
||||
parser.add_argument('--before', type=str,
|
||||
help="Command to execute before run (Experimental).")
|
||||
parser.add_argument('--watch', action='store_true', help="Watch changes.")
|
||||
parser.add_argument('--multiple', action='store_true',
|
||||
help="Allow Multiple instances (Not Recommended).")
|
||||
parser.add_argument('--beep', action='store_true',
|
||||
help="Beep sound will be played when it ends at exit.")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def prepare():
|
||||
"""Prepare basic setup for main loop running."""
|
||||
global log
|
||||
log = make_logger("css-html-js-minify", emoji=True) # Make a Logger Log.
|
||||
set_terminal_title("css-html-js-minify")
|
||||
check_encoding() # AutoMagically Check Encodings/root
|
||||
set_process_name("css-html-js-minify") # set Name
|
||||
set_single_instance("css-html-js-minify") # set Single Instance
|
||||
return log
|
||||
|
||||
|
||||
def main():
|
||||
"""Main Loop."""
|
||||
args = make_arguments_parser()
|
||||
log.disable(log.CRITICAL) if args.quiet else log.debug("Max Logging ON")
|
||||
log.info(__doc__ + __version__)
|
||||
check_folder(os.path.dirname(args.fullpath))
|
||||
atexit.register(beep) if args.beep else log.debug("Beep sound at exit OFF")
|
||||
if os.path.isfile(args.fullpath) and args.fullpath.endswith(".css"):
|
||||
log.info("Target is a CSS File.") # Work based on if argument is
|
||||
list_of_files = str(args.fullpath) # file or folder, folder is slower.
|
||||
process_single_css_file(
|
||||
args.fullpath, wrap=args.wrap, timestamp=args.timestamp,
|
||||
comments=args.comments, sort=args.sort, overwrite=args.overwrite,
|
||||
zipy=args.zipy, prefix=args.prefix, add_hash=args.hash)
|
||||
elif os.path.isfile(args.fullpath) and args.fullpath.endswith(
|
||||
".html" if args.overwrite else ".htm"):
|
||||
log.info("Target is HTML File.")
|
||||
list_of_files = str(args.fullpath)
|
||||
process_single_html_file(
|
||||
args.fullpath, comments=args.comments,
|
||||
overwrite=args.overwrite, prefix=args.prefix, add_hash=args.hash)
|
||||
elif os.path.isfile(args.fullpath) and args.fullpath.endswith(".js"):
|
||||
log.info("Target is a JS File.")
|
||||
list_of_files = str(args.fullpath)
|
||||
process_single_js_file(
|
||||
args.fullpath, timestamp=args.timestamp,
|
||||
overwrite=args.overwrite, zipy=args.zipy)
|
||||
elif os.path.isdir(args.fullpath):
|
||||
log.info("Target is a Folder with CSS, HTML, JS files !.")
|
||||
log.warning("Processing a whole Folder may take some time...")
|
||||
list_of_files = walk2list(
|
||||
args.fullpath,
|
||||
(".css", ".js", ".html" if args.overwrite else ".htm"),
|
||||
(".min.css", ".min.js", ".htm" if args.overwrite else ".html"))
|
||||
log.info('Total Maximum CPUs used: ~{0} Cores.'.format(cpu_count()))
|
||||
pool = Pool(cpu_count()) # Multiprocessing Async
|
||||
pool.map_async(partial(
|
||||
process_multiple_files, watch=args.watch,
|
||||
wrap=args.wrap, timestamp=args.timestamp,
|
||||
comments=args.comments, sort=args.sort,
|
||||
overwrite=args.overwrite, zipy=args.zipy,
|
||||
prefix=args.prefix, add_hash=args.hash),
|
||||
list_of_files)
|
||||
pool.close()
|
||||
pool.join()
|
||||
else:
|
||||
log.critical("File or folder not found,or cant be read,or I/O Error.")
|
||||
sys.exit(1)
|
||||
if args.after and getoutput:
|
||||
log.info(getoutput(str(args.after)))
|
||||
log.info('\n {0} \n Files Processed: {1}.'.format('-' * 80, list_of_files))
|
||||
log.info('Number of Files Processed: {0}.'.format(
|
||||
len(list_of_files) if isinstance(list_of_files, tuple) else 1))
|
||||
set_terminal_title()
|
||||
make_post_exec_msg(start_time)
|
||||
213
utils/web_converter_python/css_html_js_minify/variables.py
Normal file
213
utils/web_converter_python/css_html_js_minify/variables.py
Normal file
@@ -0,0 +1,213 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Created by: juancarlospaco
|
||||
# GitHub Repo: https://github.com/juancarlospaco/css-html-js-minify
|
||||
|
||||
"""Variables for CSS processing for CSS-HTML-JS-Minify."""
|
||||
|
||||
|
||||
# 'Color Name String': (R, G, B)
|
||||
EXTENDED_NAMED_COLORS = {
|
||||
'azure': (240, 255, 255),
|
||||
'beige': (245, 245, 220),
|
||||
'bisque': (255, 228, 196),
|
||||
'blanchedalmond': (255, 235, 205),
|
||||
'brown': (165, 42, 42),
|
||||
'burlywood': (222, 184, 135),
|
||||
'chartreuse': (127, 255, 0),
|
||||
'chocolate': (210, 105, 30),
|
||||
'coral': (255, 127, 80),
|
||||
'cornsilk': (255, 248, 220),
|
||||
'crimson': (220, 20, 60),
|
||||
'cyan': (0, 255, 255),
|
||||
'darkcyan': (0, 139, 139),
|
||||
'darkgoldenrod': (184, 134, 11),
|
||||
'darkgray': (169, 169, 169),
|
||||
'darkgreen': (0, 100, 0),
|
||||
'darkgrey': (169, 169, 169),
|
||||
'darkkhaki': (189, 183, 107),
|
||||
'darkmagenta': (139, 0, 139),
|
||||
'darkolivegreen': (85, 107, 47),
|
||||
'darkorange': (255, 140, 0),
|
||||
'darkorchid': (153, 50, 204),
|
||||
'darkred': (139, 0, 0),
|
||||
'darksalmon': (233, 150, 122),
|
||||
'darkseagreen': (143, 188, 143),
|
||||
'darkslategray': (47, 79, 79),
|
||||
'darkslategrey': (47, 79, 79),
|
||||
'darkturquoise': (0, 206, 209),
|
||||
'darkviolet': (148, 0, 211),
|
||||
'deeppink': (255, 20, 147),
|
||||
'dimgray': (105, 105, 105),
|
||||
'dimgrey': (105, 105, 105),
|
||||
'firebrick': (178, 34, 34),
|
||||
'forestgreen': (34, 139, 34),
|
||||
'gainsboro': (220, 220, 220),
|
||||
'gold': (255, 215, 0),
|
||||
'goldenrod': (218, 165, 32),
|
||||
'gray': (128, 128, 128),
|
||||
'green': (0, 128, 0),
|
||||
'grey': (128, 128, 128),
|
||||
'honeydew': (240, 255, 240),
|
||||
'hotpink': (255, 105, 180),
|
||||
'indianred': (205, 92, 92),
|
||||
'indigo': (75, 0, 130),
|
||||
'ivory': (255, 255, 240),
|
||||
'khaki': (240, 230, 140),
|
||||
'lavender': (230, 230, 250),
|
||||
'lavenderblush': (255, 240, 245),
|
||||
'lawngreen': (124, 252, 0),
|
||||
'lemonchiffon': (255, 250, 205),
|
||||
'lightcoral': (240, 128, 128),
|
||||
'lightcyan': (224, 255, 255),
|
||||
'lightgray': (211, 211, 211),
|
||||
'lightgreen': (144, 238, 144),
|
||||
'lightgrey': (211, 211, 211),
|
||||
'lightpink': (255, 182, 193),
|
||||
'lightsalmon': (255, 160, 122),
|
||||
'lightseagreen': (32, 178, 170),
|
||||
'lightslategray': (119, 136, 153),
|
||||
'lightslategrey': (119, 136, 153),
|
||||
'lime': (0, 255, 0),
|
||||
'limegreen': (50, 205, 50),
|
||||
'linen': (250, 240, 230),
|
||||
'magenta': (255, 0, 255),
|
||||
'maroon': (128, 0, 0),
|
||||
'mediumorchid': (186, 85, 211),
|
||||
'mediumpurple': (147, 112, 219),
|
||||
'mediumseagreen': (60, 179, 113),
|
||||
'mediumspringgreen': (0, 250, 154),
|
||||
'mediumturquoise': (72, 209, 204),
|
||||
'mediumvioletred': (199, 21, 133),
|
||||
'mintcream': (245, 255, 250),
|
||||
'mistyrose': (255, 228, 225),
|
||||
'moccasin': (255, 228, 181),
|
||||
'navy': (0, 0, 128),
|
||||
'oldlace': (253, 245, 230),
|
||||
'olive': (128, 128, 0),
|
||||
'olivedrab': (107, 142, 35),
|
||||
'orange': (255, 165, 0),
|
||||
'orangered': (255, 69, 0),
|
||||
'orchid': (218, 112, 214),
|
||||
'palegoldenrod': (238, 232, 170),
|
||||
'palegreen': (152, 251, 152),
|
||||
'paleturquoise': (175, 238, 238),
|
||||
'palevioletred': (219, 112, 147),
|
||||
'papayawhip': (255, 239, 213),
|
||||
'peachpuff': (255, 218, 185),
|
||||
'peru': (205, 133, 63),
|
||||
'pink': (255, 192, 203),
|
||||
'plum': (221, 160, 221),
|
||||
'purple': (128, 0, 128),
|
||||
'rosybrown': (188, 143, 143),
|
||||
'saddlebrown': (139, 69, 19),
|
||||
'salmon': (250, 128, 114),
|
||||
'sandybrown': (244, 164, 96),
|
||||
'seagreen': (46, 139, 87),
|
||||
'seashell': (255, 245, 238),
|
||||
'sienna': (160, 82, 45),
|
||||
'silver': (192, 192, 192),
|
||||
'slategray': (112, 128, 144),
|
||||
'slategrey': (112, 128, 144),
|
||||
'snow': (255, 250, 250),
|
||||
'springgreen': (0, 255, 127),
|
||||
'teal': (0, 128, 128),
|
||||
'thistle': (216, 191, 216),
|
||||
'tomato': (255, 99, 71),
|
||||
'turquoise': (64, 224, 208),
|
||||
'violet': (238, 130, 238),
|
||||
'wheat': (245, 222, 179)
|
||||
}
|
||||
|
||||
|
||||
# Do Not compact this string, new lines are used to Group up stuff.
|
||||
CSS_PROPS_TEXT = '''
|
||||
|
||||
alignment-adjust alignment-baseline animation animation-delay
|
||||
animation-direction animation-duration animation-iteration-count
|
||||
animation-name animation-play-state animation-timing-function appearance
|
||||
azimuth
|
||||
|
||||
backface-visibility background background-blend-mode background-attachment
|
||||
background-clip background-color background-image background-origin
|
||||
background-position background-position-block background-position-inline
|
||||
background-position-x background-position-y background-repeat background-size
|
||||
baseline-shift bikeshedding bookmark-label bookmark-level bookmark-state
|
||||
bookmark-target border border-bottom border-bottom-color
|
||||
border-bottom-left-radius border-bottom-parts border-bottom-right-radius
|
||||
border-bottom-style border-bottom-width border-clip border-clip-top
|
||||
border-clip-right border-clip-bottom border-clip-left border-collapse
|
||||
border-color border-corner-shape border-image border-image-outset
|
||||
border-image-repeat border-image-slice border-image-source border-image-width
|
||||
border-left border-left-color border-left-style border-left-parts
|
||||
border-left-width border-limit border-parts border-radius border-right
|
||||
border-right-color border-right-style border-right-width border-right-parts
|
||||
border-spacing border-style border-top border-top-color border-top-left-radius
|
||||
border-top-parts border-top-right-radius border-top-style border-top-width
|
||||
border-width bottom box-decoration-break box-shadow box-sizing
|
||||
|
||||
caption-side clear clip color column-count column-fill column-gap column-rule
|
||||
column-rule-color column-rule-style column-rule-width column-span column-width
|
||||
columns content counter-increment counter-reset corners corner-shape
|
||||
cue cue-after cue-before cursor
|
||||
|
||||
direction display drop-initial-after-adjust drop-initial-after-align
|
||||
drop-initial-before-adjust drop-initial-before-align drop-initial-size
|
||||
drop-initial-value
|
||||
|
||||
elevation empty-cells
|
||||
|
||||
flex flex-basis flex-direction flex-flow flex-grow flex-shrink flex-wrap fit
|
||||
fit-position float font font-family font-size font-size-adjust font-stretch
|
||||
font-style font-variant font-weight
|
||||
|
||||
grid-columns grid-rows
|
||||
|
||||
justify-content
|
||||
|
||||
hanging-punctuation height hyphenate-character hyphenate-resource hyphens
|
||||
|
||||
icon image-orientation image-resolution inline-box-align
|
||||
|
||||
left letter-spacing line-height line-stacking line-stacking-ruby
|
||||
line-stacking-shift line-stacking-strategy linear-gradient list-style
|
||||
list-style-image list-style-position list-style-type
|
||||
|
||||
margin margin-bottom margin-left margin-right margin-top marquee-direction
|
||||
marquee-loop marquee-speed marquee-style max-height max-width min-height
|
||||
min-width
|
||||
|
||||
nav-index
|
||||
|
||||
opacity orphans outline outline-color outline-offset outline-style
|
||||
outline-width overflow overflow-style overflow-x overflow-y
|
||||
|
||||
padding padding-bottom padding-left padding-right padding-top page
|
||||
page-break-after page-break-before page-break-inside pause pause-after
|
||||
pause-before perspective perspective-origin pitch pitch-range play-during
|
||||
position presentation-level
|
||||
|
||||
quotes
|
||||
|
||||
resize rest rest-after rest-before richness right rotation rotation-point
|
||||
ruby-align ruby-overhang ruby-position ruby-span
|
||||
|
||||
size speak speak-header speak-numeral speak-punctuation speech-rate src
|
||||
stress string-set
|
||||
|
||||
table-layout target target-name target-new target-position text-align
|
||||
text-align-last text-decoration text-emphasis text-indent text-justify
|
||||
text-outline text-shadow text-transform text-wrap top transform
|
||||
transform-origin transition transition-delay transition-duration
|
||||
transition-property transition-timing-function
|
||||
|
||||
unicode-bidi unicode-range
|
||||
|
||||
vertical-align visibility voice-balance voice-duration voice-family
|
||||
voice-pitch voice-range voice-rate voice-stress voice-volume volume
|
||||
|
||||
white-space widows width word-break word-spacing word-wrap
|
||||
|
||||
z-index
|
||||
|
||||
'''
|
||||
221
utils/web_converter_python/webConverter.py
Normal file
221
utils/web_converter_python/webConverter.py
Normal file
@@ -0,0 +1,221 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Created by: xdavidhu
|
||||
|
||||
import os
|
||||
import gzip
|
||||
import argparse
|
||||
import binascii
|
||||
from pathlib import Path, PurePath
|
||||
try:
|
||||
from css_html_js_minify.minify import process_single_html_file, process_single_js_file, process_single_css_file
|
||||
except ModuleNotFoundError:
|
||||
print("\n[!] Requirements are not satisfied. Please install the 'anglerfish' package by running 'sudo python3 -m pip install anglerfish'.\n")
|
||||
exit()
|
||||
|
||||
parser = argparse.ArgumentParser(usage="webConverter.py --repopath [path-to-repo]")
|
||||
parser.add_argument("--repopath", type=str,
|
||||
help='Path to the repo, if not set make sure to run the script from [repo]/utils/web_converter_python/ directory')
|
||||
|
||||
print("\nwebConverter for the deauther2.0 by @xdavidhu\n")
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.repopath != None:
|
||||
parent = args.repopath
|
||||
print("[+] Using manual path '" + args.repopath + "'\n")
|
||||
else:
|
||||
p = Path.cwd()
|
||||
parent = p.parent.parent
|
||||
license_file_path = str(os.path.join(str(parent), "LICENSE"))
|
||||
q = PurePath('esp8266_deauther')
|
||||
arduino_file_path = str(os.path.join(str(parent / q), "webfiles.h"))
|
||||
datadir = parent / q
|
||||
q = PurePath('web_interface')
|
||||
dir = parent / q
|
||||
q = PurePath('data')
|
||||
datadir = datadir / q
|
||||
if not os.path.exists(str(datadir)):
|
||||
os.mkdir(str(datadir))
|
||||
q = PurePath('web')
|
||||
compressed = datadir / q
|
||||
if not os.path.exists(str(compressed)):
|
||||
os.mkdir(str(compressed))
|
||||
|
||||
html_files = []
|
||||
css_files = []
|
||||
js_files = []
|
||||
lang_files = []
|
||||
progmem_definitions = ""
|
||||
copy_files_function = ""
|
||||
|
||||
filelist = Path(dir).glob('**/*')
|
||||
for x in filelist:
|
||||
if x.is_file():
|
||||
if x.parts[-2] == "compressed" or x.parts[-3] == "compressed":
|
||||
continue
|
||||
if x.suffix == ".html":
|
||||
html_files.append(x)
|
||||
elif x.suffix == ".css":
|
||||
css_files.append(x)
|
||||
elif x.suffix == ".js":
|
||||
js_files.append(x)
|
||||
elif x.suffix == ".lang":
|
||||
lang_files.append(x)
|
||||
|
||||
for file in html_files:
|
||||
base_file = os.path.basename(str(file))
|
||||
original_file = str(file)
|
||||
new_file = str(os.path.join(str(compressed), str(base_file)))
|
||||
print("[+] Minifying " + base_file + "...")
|
||||
process_single_html_file(original_file, output_path=new_file)
|
||||
print("[+] Compressing " + base_file + "...")
|
||||
f_in = open(new_file, encoding='UTF-8')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
os.remove(new_file)
|
||||
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
|
||||
fo.write(content.encode("UTF-8"))
|
||||
f_in = open(new_file + ".gz", 'rb')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
array_name = base_file.replace(".", "")
|
||||
hex_formatted_content = ""
|
||||
hex_content = binascii.hexlify(content)
|
||||
hex_content = hex_content.decode("UTF-8")
|
||||
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
|
||||
for char in hex_content:
|
||||
hex_formatted_content += "0x" + char + ", "
|
||||
hex_formatted_content = hex_formatted_content[:-2]
|
||||
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
|
||||
copy_files_function += ' if(!SPIFFS.exists("/web/' + base_file + '.gz")) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), "/web/' + base_file + '.gz");\n'
|
||||
|
||||
for file in css_files:
|
||||
base_file = os.path.basename(str(file))
|
||||
original_file = str(file)
|
||||
new_file = str(os.path.join(str(compressed), str(base_file)))
|
||||
print("[+] Minifying " + base_file + "...")
|
||||
process_single_css_file(original_file, output_path=new_file)
|
||||
print("[+] Compressing " + base_file + "...")
|
||||
f_in = open(new_file, encoding='UTF-8')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
os.remove(new_file)
|
||||
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
|
||||
fo.write(content.encode("UTF-8"))
|
||||
f_in = open(new_file + ".gz", 'rb')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
array_name = base_file.replace(".", "")
|
||||
hex_formatted_content = ""
|
||||
hex_content = binascii.hexlify(content)
|
||||
hex_content = hex_content.decode("UTF-8")
|
||||
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
|
||||
for char in hex_content:
|
||||
hex_formatted_content += "0x" + char + ", "
|
||||
hex_formatted_content = hex_formatted_content[:-2]
|
||||
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
|
||||
copy_files_function += ' if(!SPIFFS.exists("/web/' + base_file + '.gz")) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), "/web/' + base_file + '.gz");\n'
|
||||
|
||||
for file in js_files:
|
||||
q = PurePath('js')
|
||||
compressed_js = compressed / q
|
||||
if not os.path.exists(str(compressed_js)):
|
||||
os.mkdir(str(compressed_js))
|
||||
base_file = os.path.basename(str(file))
|
||||
original_file = str(file)
|
||||
new_file = str(os.path.join(str(compressed_js), str(base_file)))
|
||||
#print("[+] Minifying " + base_file + "...")
|
||||
#process_single_js_file(original_file, output_path=new_file)
|
||||
print("[+] Compressing " + base_file + "...")
|
||||
f_in = open(original_file, encoding='UTF-8')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
#os.remove(new_file)
|
||||
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
|
||||
fo.write(content.encode("UTF-8"))
|
||||
f_in = open(new_file + ".gz", 'rb')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
array_name = base_file.replace(".", "")
|
||||
hex_formatted_content = ""
|
||||
hex_content = binascii.hexlify(content)
|
||||
hex_content = hex_content.decode("UTF-8")
|
||||
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
|
||||
for char in hex_content:
|
||||
hex_formatted_content += "0x" + char + ", "
|
||||
hex_formatted_content = hex_formatted_content[:-2]
|
||||
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
|
||||
copy_files_function += ' if(!SPIFFS.exists("/web/js/' + base_file + '.gz")) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), "/web/js/' + base_file + '.gz");\n'
|
||||
|
||||
for file in lang_files:
|
||||
q = PurePath('lang')
|
||||
compressed_lang = compressed / q
|
||||
if not os.path.exists(str(compressed_lang)):
|
||||
os.mkdir(str(compressed_lang))
|
||||
base_file = os.path.basename(str(file))
|
||||
original_file = str(file)
|
||||
new_file = str(os.path.join(str(compressed_lang), str(base_file)))
|
||||
print("[+] Compressing " + base_file + "...")
|
||||
f_in = open(original_file, encoding='UTF-8')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
|
||||
fo.write(content.encode("UTF-8"))
|
||||
f_in = open(new_file + ".gz", 'rb')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
array_name = base_file.replace(".", "")
|
||||
hex_formatted_content = ""
|
||||
hex_content = binascii.hexlify(content)
|
||||
hex_content = hex_content.decode("UTF-8")
|
||||
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
|
||||
for char in hex_content:
|
||||
hex_formatted_content += "0x" + char + ", "
|
||||
hex_formatted_content = hex_formatted_content[:-2]
|
||||
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
|
||||
copy_files_function += ' if(!SPIFFS.exists("/web/lang/' + base_file + '.gz")) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), "/web/lang/' + base_file + '.gz");\n'
|
||||
|
||||
base_file = os.path.basename(license_file_path)
|
||||
new_file = str(os.path.join(str(compressed), str("LICENSE")))
|
||||
print("[+] Compressing " + base_file + "...")
|
||||
f_in = open(license_file_path, encoding='UTF-8')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
with gzip.GzipFile(new_file + ".gz", mode='w') as fo:
|
||||
fo.write(content.encode("UTF-8"))
|
||||
f_in = open(new_file + ".gz", 'rb')
|
||||
content = f_in.read()
|
||||
f_in.close()
|
||||
array_name = base_file.replace(".", "")
|
||||
hex_formatted_content = ""
|
||||
hex_content = binascii.hexlify(content)
|
||||
hex_content = hex_content.decode("UTF-8")
|
||||
hex_content = [hex_content[i:i+2] for i in range(0, len(hex_content), 2)]
|
||||
for char in hex_content:
|
||||
hex_formatted_content += "0x" + char + ", "
|
||||
hex_formatted_content = hex_formatted_content[:-2]
|
||||
progmem_definitions += "const char " + array_name + "[] PROGMEM = {" + hex_formatted_content + "};\n"
|
||||
copy_files_function += ' if(!SPIFFS.exists("/web/' + base_file + '.gz")) progmemToSpiffs(' + array_name + ', sizeof(' + array_name + '), "/web/' + base_file + '.gz");\n'
|
||||
|
||||
print("[+] Saving everything into webfiles.h...")
|
||||
f = open(arduino_file_path, 'w')
|
||||
f.write("#ifndef webfiles_h\n")
|
||||
f.write("#define webfiles_h\n")
|
||||
f.write("\n")
|
||||
f.write("// comment that out if you want to save program memory and know how to upload the web files to the SPIFFS manually\n")
|
||||
f.write("#define USE_PROGMEM_WEB_FILES \n")
|
||||
f.write("\n")
|
||||
f.write("#ifdef USE_PROGMEM_WEB_FILES\n")
|
||||
f.write(progmem_definitions)
|
||||
f.write("#endif\n")
|
||||
f.write("\n")
|
||||
f.write("void copyWebFiles(){\n")
|
||||
f.write("#ifdef USE_PROGMEM_WEB_FILES\n")
|
||||
f.write(copy_files_function)
|
||||
f.write("#endif\n")
|
||||
f.write("}\n")
|
||||
f.write("\n")
|
||||
f.write("#endif")
|
||||
f.close()
|
||||
|
||||
print("\n[+] Done, happy uploading :)")
|
||||
Reference in New Issue
Block a user