Compare commits

..

No commits in common. "master" and "v2.3.0" have entirely different histories.

84 changed files with 2359 additions and 2487 deletions

View File

@ -1,91 +0,0 @@
name: Build and test
on: [ push ]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest, macos-latest, windows-latest ]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '^1.24' # The Go version to download (if necessary) and use.
- run: go test -race -coverprofile coverage.txt -coverpkg ./... -covermode atomic ./...
- uses: codecov/codecov-action@v4
with:
files: coverage.txt
token: ${{ secrets.CODECOV_TOKEN }}
compat-test:
runs-on: ubuntu-latest
strategy:
matrix:
encryption-method: [ plain, chacha20-poly1305 ]
num-conn: [ 0, 1, 4 ]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '^1.24'
- name: Build Cloak
run: make
- name: Create configs
run: |
mkdir config
cat << EOF > config/ckclient.json
{
"Transport": "direct",
"ProxyMethod": "iperf",
"EncryptionMethod": "${{ matrix.encryption-method }}",
"UID": "Q4GAXHVgnDLXsdTpw6bmoQ==",
"PublicKey": "4dae/bF43FKGq+QbCc5P/E/MPM5qQeGIArjmJEHiZxc=",
"ServerName": "cloudflare.com",
"BrowserSig": "firefox",
"NumConn": ${{ matrix.num-conn }}
}
EOF
cat << EOF > config/ckserver.json
{
"ProxyBook": {
"iperf": [
"tcp",
"127.0.0.1:5201"
]
},
"BindAddr": [
":8443"
],
"BypassUID": [
"Q4GAXHVgnDLXsdTpw6bmoQ=="
],
"RedirAddr": "cloudflare.com",
"PrivateKey": "AAaskZJRPIAbiuaRLHsvZPvE6gzOeSjg+ZRg1ENau0Y="
}
EOF
- name: Start iperf3 server
run: docker run -d --name iperf-server --network host ajoergensen/iperf3:latest --server
- name: Test new client against old server
run: |
docker run -d --name old-cloak-server --network host -v $PWD/config:/go/Cloak/config cbeuw/cloak:latest build/ck-server -c config/ckserver.json --verbosity debug
build/ck-client -c config/ckclient.json -s 127.0.0.1 -p 8443 --verbosity debug | tee new-cloak-client.log &
docker run --network host ajoergensen/iperf3:latest --client 127.0.0.1 -p 1984
docker stop old-cloak-server
- name: Test old client against new server
run: |
build/ck-server -c config/ckserver.json --verbosity debug | tee new-cloak-server.log &
docker run -d --name old-cloak-client --network host -v $PWD/config:/go/Cloak/config cbeuw/cloak:latest build/ck-client -c config/ckclient.json -s 127.0.0.1 -p 8443 --verbosity debug
docker run --network host ajoergensen/iperf3:latest --client 127.0.0.1 -p 1984
docker stop old-cloak-client
- name: Dump docker logs
if: always()
run: |
docker container logs iperf-server > iperf-server.log
docker container logs old-cloak-server > old-cloak-server.log
docker container logs old-cloak-client > old-cloak-client.log
- name: Upload logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.encryption-method }}-${{ matrix.num-conn }}-conn-logs
path: ./*.log

View File

@ -1,50 +0,0 @@
on:
push:
tags:
- 'v*'
name: Create Release
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build
run: |
export PATH=${PATH}:`go env GOPATH`/bin
v=${GITHUB_REF#refs/*/} ./release.sh
- name: Release
uses: softprops/action-gh-release@v1
with:
files: release/*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
build-docker:
runs-on: ubuntu-latest
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
cbeuw/cloak
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v6
with:
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

4
.gitignore vendored
View File

@ -1,6 +1,4 @@
corpus/ corpus/
suppressions/ suppressions/
crashers/ crashers/
*.zip *.zip
.idea/
build/

10
.travis.yml Normal file
View File

@ -0,0 +1,10 @@
language: go
go:
- "1.14"
script:
- go test -race -coverprofile=coverage.txt -coverpkg=./... -covermode=atomic ./...
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,5 +0,0 @@
FROM golang:latest
RUN git clone https://github.com/cbeuw/Cloak.git
WORKDIR Cloak
RUN make

199
README.md
View File

@ -1,75 +1,52 @@
[![Build Status](https://github.com/cbeuw/Cloak/workflows/Build%20and%20test/badge.svg)](https://github.com/cbeuw/Cloak/actions) [![Build Status](https://travis-ci.org/cbeuw/Cloak.svg?branch=master)](https://travis-ci.org/cbeuw/Cloak)
[![codecov](https://codecov.io/gh/cbeuw/Cloak/branch/master/graph/badge.svg)](https://codecov.io/gh/cbeuw/Cloak) [![codecov](https://codecov.io/gh/cbeuw/Cloak/branch/master/graph/badge.svg)](https://codecov.io/gh/cbeuw/Cloak)
[![Go Report Card](https://goreportcard.com/badge/github.com/cbeuw/Cloak)](https://goreportcard.com/report/github.com/cbeuw/Cloak) [![Go Report Card](https://goreportcard.com/badge/github.com/cbeuw/Cloak)](https://goreportcard.com/report/github.com/cbeuw/Cloak)
[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url) [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url)
<p align="center">
<img src="https://user-images.githubusercontent.com/7034308/96387206-3e214100-1198-11eb-8917-689d7c56e0cd.png" />
<img src="https://user-images.githubusercontent.com/7034308/155593583-f22bcfe2-ac22-4afb-9288-1a0e8a791a0d.svg" />
</p>
<p align="center"> ![logo](https://user-images.githubusercontent.com/7034308/96387206-3e214100-1198-11eb-8917-689d7c56e0cd.png)
<img src="https://user-images.githubusercontent.com/7034308/155629720-54dd8758-ec98-4fed-b603-623f0ad83b6c.svg" />
</p>
Cloak is a [pluggable transport](https://datatracker.ietf.org/meeting/103/materials/slides-103-pearg-pt-slides-01) that enhances ![diagram](https://user-images.githubusercontent.com/7034308/65385852-7eab5280-dd2b-11e9-8887-db449b250e2a.png)
traditional proxy tools like OpenVPN to evade [sophisticated censorship](https://en.wikipedia.org/wiki/Deep_packet_inspection) and [data discrimination](https://en.wikipedia.org/wiki/Net_bias).
Cloak is not a standalone proxy program. Rather, it works by masquerading proxied traffic as normal web browsing Cloak is a [pluggable transport](https://www.ietf.org/proceedings/103/slides/slides-103-pearg-pt-slides-01) that works alongside traditional proxy tools like OpenVPN to evade deep-packet-inspection based censorship.
activities. In contrast to traditional tools which have very prominent traffic fingerprints and can be blocked by simple filtering rules,
it's very difficult to precisely target Cloak with little false positives. This increases the collateral damage to censorship actions as
attempts to block Cloak could also damage services the censor state relies on.
To any third party observer, a host running Cloak server is indistinguishable from an innocent web server. Both while Cloak is not a standalone proxy program. Rather, it works by masquerading proxy tool's traffic as normal web browsing traffic. In contrast to traditional tools which have very prominent traffic "fingerprints", it's very difficult to precisely target Cloak with little false positives. This increases the collateral damage to censorship actions as attempts to block Cloak could also damage services the censor state relies on.
passively observing traffic flow to and from the server, as well as while actively probing the behaviours of a Cloak
server. This is achieved through the use a series
of [cryptographic steganography techniques](https://github.com/cbeuw/Cloak/wiki/Steganography-and-encryption).
Cloak can be used in conjunction with any proxy program that tunnels traffic through TCP or To a third party observer, a host running Cloak server is indistinguishable from an innocent web server. Both while passively observing traffic flow to and from the server, as well as while actively probing the behaviours of a Cloak server. This is achieved through the use a series of [cryptographic stegnatography techniques](https://github.com/cbeuw/Cloak/wiki/Steganography-and-encryption).
UDP, such as Shadowsocks, OpenVPN and Tor. Multiple proxy servers can be running on the same server host and
Cloak server will act as a reverse proxy, bridging clients with their desired proxy end.
Cloak multiplexes traffic through multiple underlying TCP connections which reduces head-of-line blocking and eliminates Since Cloak is transparent, it can be used in conjunction with any proxy software that tunnels traffic through TCP or UDP, such as Shadowsocks, OpenVPN and Tor. Multiple proxy servers can be running on the same server host machine and Cloak server will act as a reverse proxy, bridging clients with their desired proxy end.
TCP handshake overhead. This also makes the traffic pattern more similar to real websites.
Cloak provides multi-user support, allowing multiple clients to connect to the proxy server on the same port (443 by Cloak multiplexes traffic through multiple underlying TCP connections which reduces head-of-line blocking and eliminates TCP handshake overhead. This also makes the traffic pattern more similar to real websites.
default). It also provides traffic management features such as usage credit and bandwidth control. This allows a proxy
server to serve multiple users even if the underlying proxy software wasn't designed for multiple users
Cloak also supports tunneling through an intermediary CDN server such as Amazon Cloudfront. Such services are so widely used, Cloak provides multi-user support, allowing multiple clients to connect to the proxy server on the same port (443 by default). It also provides traffic management features such as usage credit and bandwidth control. This allows a proxy server to serve multiple users even if the underlying proxy software wasn't designed for multiple users
attempts to disrupt traffic to them can lead to very high collateral damage for the censor.
## Quick Start Cloak has two modes of [_Transport_](https://github.com/cbeuw/Cloak/wiki/CDN-mode): `direct` and `CDN`. Clients can either connect to the host running Cloak server directly, or it can instead connect to a CDN edge server, which may be used by many other websites as well, thus further increases the collateral damage to censorship.
To quickly deploy Cloak with Shadowsocks on a server, you can run
this [script](https://github.com/HirbodBehnam/Shadowsocks-Cloak-Installer/blob/master/Cloak2-Installer.sh) written by
@HirbodBehnam
Table of Contents Table of Contents
================= =================
* [Quick Start](#quick-start) * [Quick Start](#quick-start)
* [Build](#build) * [Build](#build)
* [Configuration](#configuration) * [Configuration](#configuration)
* [Server](#server) * [Server](#server)
* [Client](#client) * [Client](#client)
* [Setup](#setup) * [Setup](#setup)
* [Server](#server-1) * [For the administrator of the server](#for-the-administrator-of-the-server)
* [To add users](#to-add-users) * [To add users](#to-add-users)
* [Unrestricted users](#unrestricted-users) * [Unrestricted users](#unrestricted-users)
* [Users subject to bandwidth and credit controls](#users-subject-to-bandwidth-and-credit-controls) * [Users subject to bandwidth and credit controls](#users-subject-to-bandwidth-and-credit-controls)
* [Client](#client-1) * [Instructions for clients](#instructions-for-clients)
* [Support me](#support-me) * [Support me](#support-me)
## Build ## Quick Start
To quickly deploy Cloak with Shadowsocks on a server, you can run this [script](https://github.com/HirbodBehnam/Shadowsocks-Cloak-Installer/blob/master/Cloak2-Installer.sh) written by @HirbodBehnam
## Build
```bash ```bash
git clone https://github.com/cbeuw/Cloak git clone https://github.com/cbeuw/Cloak
cd Cloak cd Cloak
go get ./... go get -u ./...
make make
``` ```
Built binaries will be in `build` folder. Built binaries will be in `build` folder.
## Configuration ## Configuration
@ -77,163 +54,89 @@ Built binaries will be in `build` folder.
Examples of configuration files can be found under `example_config` folder. Examples of configuration files can be found under `example_config` folder.
### Server ### Server
`RedirAddr` is the redirection address when the incoming traffic is not from a Cloak client. Ideally it should be set to a major website allowed by the censor (e.g. `www.bing.com`)
`RedirAddr` is the redirection address when the incoming traffic is not from a Cloak client. Ideally it should be set to `BindAddr` is a list of addresses Cloak will bind and listen to (e.g. `[":443",":80"]` to listen to port 443 and 80 on all interfaces)
a major website allowed by the censor (e.g. `www.bing.com`)
`BindAddr` is a list of addresses Cloak will bind and listen to (e.g. `[":443",":80"]` to listen to port 443 and 80 on `ProxyBook` is an object whose key is the name of the ProxyMethod used on the client-side (case-sensitive). Its value is an array whose first element is the protocol, and the second element is an `IP:PORT` string of the upstream proxy server that Cloak will forward the traffic to.
all interfaces)
`ProxyBook` is an object whose key is the name of the ProxyMethod used on the client-side (case-sensitive). Its value is
an array whose first element is the protocol, and the second element is an `IP:PORT` string of the upstream proxy server
that Cloak will forward the traffic to.
Example: Example:
```json ```json
{ {
"ProxyBook": { "ProxyBook": {
"shadowsocks": [ "shadowsocks": [ "tcp", "localhost:51443" ],
"tcp", "openvpn": [ "tcp", "localhost:12345" ]
"localhost:51443" }
],
"openvpn": [
"tcp",
"localhost:12345"
]
}
} }
``` ```
`PrivateKey` is the static curve25519 Diffie-Hellman private key encoded in base64. `PrivateKey` is the static curve25519 Diffie-Hellman private key encoded in base64.
`AdminUID` is the UID of the admin user in base64.
`BypassUID` is a list of UIDs that are authorised without any bandwidth or credit limit restrictions `BypassUID` is a list of UIDs that are authorised without any bandwidth or credit limit restrictions
`AdminUID` is the UID of the admin user in base64. You can leave this empty if you only ever add users to `BypassUID`. `DatabasePath` is the path to `userinfo.db`. If `userinfo.db` doesn't exist in this directory, Cloak will create one automatically. **If Cloak is started as a Shadowsocks plugin and Shadowsocks is started with its working directory as / (e.g. starting ss-server with systemctl), you need to set this field as an absolute path to a desired folder. If you leave it as default then Cloak will attempt to create userinfo.db under /, which it doesn't have the permission to do so and will raise an error. See Issue #13.**
`DatabasePath` is the path to `userinfo.db`, which is used to store user usage information and restrictions. Cloak will `KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the upstream proxy server. Zero or negative value disables it. Default is 0 (disabled).
create the file automatically if it doesn't exist. You can leave this empty if you only ever add users to `BypassUID`.
This field also has no effect if `AdminUID` isn't a valid UID or is empty.
`KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the `StreamTimeout` is the number of seconds of no data sent after which the incoming Cloak client connection will be terminated. Default is 300 seconds.
upstream proxy server. Zero or negative value disables it. Default is 0 (disabled).
### Client ### Client
`UID` is your UID in base64. `UID` is your UID in base64.
`Transport` can be either `direct` or `CDN`. If the server host wishes you to connect to it directly, use `direct`. If `Transport` can be either `direct` or `CDN`. If the server host wishes you to connect to it directly, use `direct`. If instead a CDN is used, use `CDN`.
instead a CDN is used, use `CDN`.
`PublicKey` is the static curve25519 public key in base64, given by the server admin. `PublicKey` is the static curve25519 public key in base64, given by the server admin.
`ProxyMethod` is the name of the proxy method you are using. This must match one of the entries in the `ProxyMethod` is the name of the proxy method you are using. This must match one of the entries in the server's `ProxyBook` exactly.
server's `ProxyBook` exactly.
`EncryptionMethod` is the name of the encryption algorithm you want Cloak to use. Options are `plain`, `aes-256-gcm` ( `EncryptionMethod` is the name of the encryption algorithm you want Cloak to use. Note: Cloak isn't intended to provide transport security. The point of encryption is to hide fingerprints of proxy protocols and render the payload statistically random-like. If the proxy protocol is already fingerprint-less, which is the case for Shadowsocks, this field can be left as `plain`. Options are `plain`, `aes-gcm` and `chacha20-poly1305`.
synonymous to `aes-gcm`), `aes-128-gcm`, and `chacha20-poly1305`. Note: Cloak isn't intended to provide transport
security. The point of encryption is to hide fingerprints of proxy protocols and render the payload statistically
random-like. **You may only leave it as `plain` if you are certain that your underlying proxy tool already provides BOTH
encryption and authentication (via AEAD or similar techniques).**
`ServerName` is the domain you want to make your ISP or firewall _think_ you are visiting. Ideally it should `ServerName` is the domain you want to make your ISP or firewall _think_ you are visiting. Ideally it should match `RedirAddr` in the server's configuration, a major site the censor allows, but it doesn't have to.
match `RedirAddr` in the server's configuration, a major site the censor allows, but it doesn't have to. Use `random` to randomize the server name for every connection made.
`AlternativeNames` is an array used alongside `ServerName` to shuffle between different ServerNames for every new `NumConn` is the amount of underlying TCP connections you want to use. The default of 4 should be appropriate for most people. Setting it too high will hinder the performance. Setting it to 0 will disable connection multiplexing and each TCP connection will spawn a separate short lived session that will be closed after it is terminated. This makes it behave like GoQuiet. This maybe useful for people with unstable connections.
connection. **This may conflict with `CDN` Transport mode** if the CDN provider prohibits domain fronting and rejects
the alternative domains.
Example: `BrowserSig` is the browser you want to **appear** to be using. It's not relevant to the browser you are actually using. Currently, `chrome` and `firefox` are supported.
```json `KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the Cloak server. Zero or negative value disables it. Default is 0 (disabled). Warning: Enabling it might make your server more detectable as a proxy, but it will make the Cloak client detect internet interruption more quickly.
{
"ServerName": "bing.com",
"AlternativeNames": ["cloudflare.com", "github.com"]
}
```
`CDNOriginHost` is the domain name of the _origin_ server (i.e. the server running Cloak) under `CDN` mode. This only `StreamTimeout` is the number of seconds of no data received after which the incoming proxy connection will be terminated. Default is 300 seconds.
has effect when `Transport` is set to `CDN`. If unset, it will default to the remote hostname supplied via the
commandline argument (in standalone mode), or by Shadowsocks (in plugin mode). After a TLS session is established with
the CDN server, this domain name will be used in the `Host` header of the HTTP request to ask the CDN server to
establish a WebSocket connection with this host.
`CDNWsUrlPath` is the url path used to build websocket request sent under `CDN` mode, and also only has effect
when `Transport` is set to `CDN`. If unset, it will default to "/". This option is used to build the first line of the
HTTP request after a TLS session is extablished. It's mainly for a Cloak server behind a reverse proxy, while only
requests under specific url path are forwarded.
`NumConn` is the amount of underlying TCP connections you want to use. The default of 4 should be appropriate for most
people. Setting it too high will hinder the performance. Setting it to 0 will disable connection multiplexing and each
TCP connection will spawn a separate short-lived session that will be closed after it is terminated. This makes it
behave like GoQuiet. This maybe useful for people with unstable connections.
`BrowserSig` is the browser you want to **appear** to be using. It's not relevant to the browser you are actually using.
Currently, `chrome`, `firefox` and `safari` are supported.
`KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the
Cloak server. Zero or negative value disables it. Default is 0 (disabled). Warning: Enabling it might make your server
more detectable as a proxy, but it will make the Cloak client detect internet interruption more quickly.
`StreamTimeout` is the number of seconds of Cloak waits for an incoming connection from a proxy program to send any
data, after which the connection will be closed by Cloak. Cloak will not enforce any timeout on TCP connections after it
is established.
## Setup ## Setup
### For the administrator of the server
### Server
0. Install at least one underlying proxy server (e.g. OpenVPN, Shadowsocks). 0. Install at least one underlying proxy server (e.g. OpenVPN, Shadowsocks).
1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo. 1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo.
2. Run `ck-server -key`. The **public** should be given to users, the **private** key should be kept secret. 2. Run `ck-server -k`. The base64 string before the comma is the **public** key to be given to users, the one after the comma is the **private** key to be kept secret.
3. (Skip if you only want to add unrestricted users) Run `ck-server -uid`. The new UID will be used as `AdminUID`. 3. Run `ck-server -u`. This will be used as the `AdminUID`.
4. Copy example_config/ckserver.json into a desired location. Change `PrivateKey` to the private key you just obtained; 4. Copy example_config/ckserver.json into a desired location. Change `PrivateKey` to the private key you just obtained; change `AdminUID` to the UID you just obtained.
change `AdminUID` to the UID you just obtained. 5. Configure your underlying proxy server so that they all listen on localhost. Edit `ProxyBook` in the configuration file accordingly
5. Configure your underlying proxy server so that they all listen on localhost. Edit `ProxyBook` in the configuration 6. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides) Run `sudo ck-server -c <path to ckserver.json>`. ck-server needs root privilege because it binds to a low numbered port (443). Alternatively you can follow https://superuser.com/a/892391 to avoid granting ck-server root privilege unnecessarily.
file accordingly
6. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides)
Run `sudo ck-server -c <path to ckserver.json>`. ck-server needs root privilege because it binds to a low numbered
port (443). Alternatively you can follow https://superuser.com/a/892391 to avoid granting ck-server root privilege
unnecessarily.
#### To add users #### To add users
##### Unrestricted users ##### Unrestricted users
Run `ck-server -u` and add the UID into the `BypassUID` field in `ckserver.json`
Run `ck-server -uid` and add the UID into the `BypassUID` field in `ckserver.json`
##### Users subject to bandwidth and credit controls ##### Users subject to bandwidth and credit controls
1. On your client, run `ck-client -s <IP of the server> -l <A local port> -a <AdminUID> -c <path-to-ckclient.json>` to enter admin mode
0. First make sure you have `AdminUID` generated and set in `ckserver.json`, along with a path to `userinfo.db` 2. Visit https://cbeuw.github.io/Cloak-panel (Note: this is a pure-js static site, there is no backend and all data entered into this site are processed between your browser and the Cloak API endpoint you specified. Alternatively you can download the repo at https://github.com/cbeuw/Cloak-panel and open `index.html` in a browser. No web server is required).
in `DatabasePath` (Cloak will create this file for you if it didn't already exist).
1. On your client, run `ck-client -s <IP of the server> -l <A local port> -a <AdminUID> -c <path-to-ckclient.json>` to
enter admin mode
2. Visit https://cbeuw.github.io/Cloak-panel (Note: this is a pure-js static site, there is no backend and all data
entered into this site are processed between your browser and the Cloak API endpoint you specified. Alternatively you
can download the repo at https://github.com/cbeuw/Cloak-panel and open `index.html` in a browser. No web server is
required).
3. Type in `127.0.0.1:<the port you entered in step 1>` as the API Base, and click `List`. 3. Type in `127.0.0.1:<the port you entered in step 1>` as the API Base, and click `List`.
4. You can add in more users by clicking the `+` panel 4. You can add in more users by clicking the `+` panel
Note: the user database is persistent as it's in-disk. You don't need to add the users again each time you start Note: the user database is persistent as it's in-disk. You don't need to add the users again each time you start ck-server.
ck-server.
### Client
### Instructions for clients
**Android client is available here: https://github.com/cbeuw/Cloak-android** **Android client is available here: https://github.com/cbeuw/Cloak-android**
0. Install the underlying proxy client corresponding to what the server has. 0. Install the underlying proxy client corresponding to what the server has.
1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo. 1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo.
2. Obtain the public key and your UID from the administrator of your server 2. Obtain the public key and your UID from the administrator of your server
3. Copy `example_config/ckclient.json` into a location of your choice. Enter the `UID` and `PublicKey` you have 3. Copy `example_config/ckclient.json` into a location of your choice. Enter the `UID` and `PublicKey` you have obtained. Set `ProxyMethod` to match exactly the corresponding entry in `ProxyBook` on the server end
obtained. Set `ProxyMethod` to match exactly the corresponding entry in `ProxyBook` on the server end 4. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides) Run `ck-client -c <path to ckclient.json> -s <ip of your server>`
4. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides)
Run `ck-client -c <path to ckclient.json> -s <ip of your server>`
## Support me ## Support me
If you find this project useful, you can visit my [merch store](https://www.redbubble.com/people/cbeuw/explore); alternatively you can donate directly to me
If you find this project useful, you can visit my [merch store](https://www.redbubble.com/people/cbeuw/explore);
alternatively you can donate directly to me
[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url) [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url)

84
azure-pipelines.yml Normal file
View File

@ -0,0 +1,84 @@
# Go
# Build your Go project.
# Add steps that test, save build artifacts, deploy, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/go
trigger:
tags:
include:
- refs/tags/v*
branches:
exclude:
- master
pool:
vmImage: 'ubuntu-latest'
variables:
GOBIN: '$(GOPATH)/bin' # Go binaries path
GOROOT: '$(Agent.BuildDirectory)/go' # Go installation path
GOPATH: '$(Agent.BuildDirectory)/gopath' # Go workspace path
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code
steps:
- script: |
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
wget "https://golang.org/dl/go1.15.2.linux-amd64.tar.gz" --output-document "$(Agent.BuildDirectory)/go1.15.2.tar.gz"
tar -C '$(Agent.BuildDirectory)' -xzf "$(Agent.BuildDirectory)/go1.15.2.tar.gz"
displayName: 'Set up the Go workspace'
- script: |
go get github.com/mitchellh/gox
v="$(git describe --tags)"
output="{{.Dir}}-{{.OS}}-{{.Arch}}-$v"
osarch="!darwin/arm !darwin/arm64 !darwin/386"
echo "Compiling:"
os="windows linux darwin"
arch="amd64 386 arm arm64 mips mips64 mipsle mips64le"
pushd cmd/ck-client || exit 1
gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
GOOS="linux" GOARCH="mips" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mips_softfloat-"${v}"
GOOS="linux" GOARCH="mipsle" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mipsle_softfloat-"${v}"
mv ck-client-* $(Build.ArtifactStagingDirectory)/
os="linux"
arch="amd64 386 arm arm64"
pushd ../ck-server || exit 1
gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
mv ck-server-* $(Build.ArtifactStagingDirectory)/
workingDirectory: '$(modulePath)'
displayName: 'Get dependencies, then build'
# GitHub Release
# Create, edit, or delete a GitHub release
- task: GitHubRelease@0
inputs:
gitHubConnection: github.com_cbeuw
repositoryName: '$(Build.Repository.Name)'
action: 'create' # Options: create, edit, delete
target: '$(Build.SourceVersion)' # Required when action == Create || Action == Edit
tagSource: 'auto' # Required when action == Create# Options: auto, manual
#tagPattern: # Optional
#tag: "$(git describe --tags)" # Required when action == Edit || Action == Delete || TagSource == Manual
#title: # Optional
#releaseNotesSource: 'file' # Optional. Options: file, input
#releaseNotesFile: # Optional
#releaseNotes: # Optional
#assets: '$(Build.ArtifactStagingDirectory)/*' # Optional
#assetUploadMode: 'delete' # Optional. Options: delete, replace
#isDraft: false # Optional
#isPreRelease: false # Optional
addChangeLog: false # Optional
#compareWith: 'lastFullRelease' # Required when addChangeLog == True. Options: lastFullRelease, lastRelease, lastReleaseByTag
#releaseTag: # Required when compareWith == LastReleaseByTag

View File

@ -1,4 +1,3 @@
//go:build go1.11
// +build go1.11 // +build go1.11
package main package main
@ -8,11 +7,10 @@ import (
"encoding/binary" "encoding/binary"
"flag" "flag"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"net" "net"
"os" "os"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/client" "github.com/cbeuw/Cloak/internal/client"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -75,9 +73,6 @@ func main() {
log.Info("Starting standalone mode") log.Info("Starting standalone mode")
} }
log.SetFormatter(&log.TextFormatter{
FullTimestamp: true,
})
lvl, err := log.ParseLevel(*verbosity) lvl, err := log.ParseLevel(*verbosity)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -90,9 +85,7 @@ func main() {
} }
if ssPluginMode { if ssPluginMode {
if rawConfig.ProxyMethod == "" { rawConfig.ProxyMethod = "shadowsocks"
rawConfig.ProxyMethod = "shadowsocks"
}
// json takes precedence over environment variables // json takes precedence over environment variables
// i.e. if json field isn't empty, use that // i.e. if json field isn't empty, use that
if rawConfig.RemoteHost == "" { if rawConfig.RemoteHost == "" {
@ -174,12 +167,6 @@ func main() {
} }
log.Infof("Listening on %v %v for %v client", network, localConfig.LocalAddr, authInfo.ProxyMethod) log.Infof("Listening on %v %v for %v client", network, localConfig.LocalAddr, authInfo.ProxyMethod)
seshMaker = func() *mux.Session { seshMaker = func() *mux.Session {
authInfo := authInfo // copy the struct because we are overwriting SessionId
randByte := make([]byte, 1)
common.RandRead(authInfo.WorldState.Rand, randByte)
authInfo.MockDomain = localConfig.MockDomainList[int(randByte[0])%len(localConfig.MockDomainList)]
// sessionID is usergenerated. There shouldn't be a security concern because the scope of // sessionID is usergenerated. There shouldn't be a security concern because the scope of
// sessionID is limited to its UID. // sessionID is limited to its UID.
quad := make([]byte, 4) quad := make([]byte, 4)
@ -195,12 +182,12 @@ func main() {
return net.ListenUDP("udp", udpAddr) return net.ListenUDP("udp", udpAddr)
} }
client.RouteUDP(acceptor, localConfig.Timeout, remoteConfig.Singleplex, seshMaker) client.RouteUDP(acceptor, localConfig.Timeout, seshMaker)
} else { } else {
listener, err := net.Listen("tcp", localConfig.LocalAddr) listener, err := net.Listen("tcp", localConfig.LocalAddr)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
client.RouteTCP(listener, localConfig.Timeout, remoteConfig.Singleplex, seshMaker) client.RouteTCP(listener, localConfig.Timeout, seshMaker)
} }
} }

View File

@ -1,4 +1,3 @@
//go:build !android
// +build !android // +build !android
package main package main

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build android
// +build android // +build android
package main package main
@ -29,10 +28,9 @@ import "C"
import ( import (
"bufio" "bufio"
log "github.com/sirupsen/logrus"
"os" "os"
"unsafe" "unsafe"
log "github.com/sirupsen/logrus"
) )
var ( var (

View File

@ -1,4 +1,3 @@
//go:build !android
// +build !android // +build !android
package main package main

View File

@ -1,6 +1,4 @@
//go:build android
// +build android // +build android
package main package main
// Stolen from https://github.com/shadowsocks/overture/blob/shadowsocks/core/utils/utils_android.go // Stolen from https://github.com/shadowsocks/overture/blob/shadowsocks/core/utils/utils_android.go
@ -66,9 +64,8 @@ void set_timeout(int sock) {
import "C" import "C"
import ( import (
"syscall"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"syscall"
) )
// In Android, once an app starts the VpnService, all outgoing traffic are routed by the system // In Android, once an app starts the VpnService, all outgoing traffic are routed by the system

View File

@ -3,16 +3,15 @@ package main
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server"
log "github.com/sirupsen/logrus"
"net" "net"
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
"os" "os"
"runtime" "runtime"
"strings" "strings"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server"
log "github.com/sirupsen/logrus"
) )
var version string var version string
@ -73,10 +72,6 @@ func main() {
var pluginMode bool var pluginMode bool
log.SetFormatter(&log.TextFormatter{
FullTimestamp: true,
})
if os.Getenv("SS_LOCAL_HOST") != "" && os.Getenv("SS_LOCAL_PORT") != "" { if os.Getenv("SS_LOCAL_HOST") != "" && os.Getenv("SS_LOCAL_PORT") != "" {
pluginMode = true pluginMode = true
config = os.Getenv("SS_PLUGIN_OPTIONS") config = os.Getenv("SS_PLUGIN_OPTIONS")
@ -85,11 +80,8 @@ func main() {
askVersion := flag.Bool("v", false, "Print the version number") askVersion := flag.Bool("v", false, "Print the version number")
printUsage := flag.Bool("h", false, "Print this message") printUsage := flag.Bool("h", false, "Print this message")
genUIDScript := flag.Bool("u", false, "Generate a UID to STDOUT") genUID := flag.Bool("u", false, "Generate a UID")
genKeyPairScript := flag.Bool("k", false, "Generate a pair of public and private key and output to STDOUT in the format of <public key>,<private key>") genKeyPair := flag.Bool("k", false, "Generate a pair of public and private key, output in the format of pubkey,pvkey")
genUIDHuman := flag.Bool("uid", false, "Generate and print out a UID")
genKeyPairHuman := flag.Bool("key", false, "Generate and print out a public-private key pair")
pprofAddr := flag.String("d", "", "debug use: ip:port to be listened by pprof profiler") pprofAddr := flag.String("d", "", "debug use: ip:port to be listened by pprof profiler")
verbosity := flag.String("verbosity", "info", "verbosity level") verbosity := flag.String("verbosity", "info", "verbosity level")
@ -104,23 +96,13 @@ func main() {
flag.Usage() flag.Usage()
return return
} }
if *genUIDScript || *genUIDHuman { if *genUID {
uid := generateUID() fmt.Println(generateUID())
if *genUIDScript {
fmt.Println(uid)
} else {
fmt.Printf("\x1B[35mYour UID is:\u001B[0m %s\n", uid)
}
return return
} }
if *genKeyPairScript || *genKeyPairHuman { if *genKeyPair {
pub, pv := generateKeyPair() pub, pv := generateKeyPair()
if *genKeyPairScript { fmt.Printf("%v,%v", pub, pv)
fmt.Printf("%v,%v\n", pub, pv)
} else {
fmt.Printf("\x1B[36mYour PUBLIC key is:\x1B[0m %65s\n", pub)
fmt.Printf("\x1B[33mYour PRIVATE key is (keep it secret):\x1B[0m %47s\n", pv)
}
return return
} }

View File

@ -1,36 +1,57 @@
package main package main
import ( import (
"github.com/stretchr/testify/assert"
"net" "net"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
func TestParseBindAddr(t *testing.T) { func TestParseBindAddr(t *testing.T) {
t.Run("port only", func(t *testing.T) { t.Run("port only", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{":443"}) addrs, err := resolveBindAddr([]string{":443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, ":443", addrs[0].String()) t.Error(err)
return
}
if addrs[0].String() != ":443" {
t.Errorf("expected %v got %v", ":443", addrs[0].String())
}
}) })
t.Run("specific address", func(t *testing.T) { t.Run("specific address", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{"192.168.1.123:443"}) addrs, err := resolveBindAddr([]string{"192.168.1.123:443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, "192.168.1.123:443", addrs[0].String()) t.Error(err)
return
}
if addrs[0].String() != "192.168.1.123:443" {
t.Errorf("expected %v got %v", "192.168.1.123:443", addrs[0].String())
}
}) })
t.Run("ipv6", func(t *testing.T) { t.Run("ipv6", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{"[::]:443"}) addrs, err := resolveBindAddr([]string{"[::]:443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, "[::]:443", addrs[0].String()) t.Error(err)
return
}
if addrs[0].String() != "[::]:443" {
t.Errorf("expected %v got %v", "[::]:443", addrs[0].String())
}
}) })
t.Run("mixed", func(t *testing.T) { t.Run("mixed", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{":80", "[::]:443"}) addrs, err := resolveBindAddr([]string{":80", "[::]:443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, ":80", addrs[0].String()) t.Error(err)
assert.Equal(t, "[::]:443", addrs[1].String()) return
}
if addrs[0].String() != ":80" {
t.Errorf("expected %v got %v", ":80", addrs[0].String())
}
if addrs[1].String() != "[::]:443" {
t.Errorf("expected %v got %v", "[::]:443", addrs[1].String())
}
}) })
} }

View File

@ -3,7 +3,6 @@ package main
import ( import (
"crypto/rand" "crypto/rand"
"encoding/base64" "encoding/base64"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
) )

View File

@ -1,4 +0,0 @@
coverage:
status:
project: off
patch: off

View File

@ -2,8 +2,8 @@
"Transport": "direct", "Transport": "direct",
"ProxyMethod": "shadowsocks", "ProxyMethod": "shadowsocks",
"EncryptionMethod": "plain", "EncryptionMethod": "plain",
"UID": "---Your UID here---", "UID": "5nneblJy6lniPJfr81LuYQ==",
"PublicKey": "---Public key here---", "PublicKey": "IYoUzkle/T/kriE+Ufdm7AHQtIeGnBWbhhlTbmDpUUI=",
"ServerName": "www.bing.com", "ServerName": "www.bing.com",
"NumConn": 4, "NumConn": 4,
"BrowserSig": "chrome", "BrowserSig": "chrome",

View File

@ -18,10 +18,11 @@
":80" ":80"
], ],
"BypassUID": [ "BypassUID": [
"---Bypass UID here---" "1rmq6Ag1jZJCImLBIL5wzQ=="
], ],
"RedirAddr": "cloudflare.com", "RedirAddr": "204.79.197.200:443",
"PrivateKey": "---Private key here---", "PrivateKey": "EN5aPEpNBO+vw+BtFQY2OnK9bQU7rvEj5qmnmgwEtUc=",
"AdminUID": "---Admin UID here (optional)---", "AdminUID": "5nneblJy6lniPJfr81LuYQ==",
"DatabasePath": "userinfo.db" "DatabasePath": "userinfo.db",
"StreamTimeout": 300
} }

44
go.mod
View File

@ -1,30 +1,24 @@
module github.com/cbeuw/Cloak module github.com/cbeuw/Cloak
go 1.24.0 go 1.14
toolchain go1.24.2
require ( require (
github.com/cbeuw/connutil v0.0.0-20200411215123-966bfaa51ee3 github.com/cbeuw/connutil v0.0.0-20200411160121-c5a5c4a9de14
github.com/gorilla/mux v1.8.1 github.com/dvyukov/go-fuzz v0.0.0-20201003075337-90825f39c90b // indirect
github.com/gorilla/websocket v1.5.3 github.com/elazarl/go-bindata-assetfs v1.0.1 // indirect
github.com/juju/ratelimit v1.0.2 github.com/gorilla/mux v1.7.3
github.com/refraction-networking/utls v1.8.0 github.com/gorilla/websocket v1.4.1
github.com/sirupsen/logrus v1.9.3 github.com/juju/ratelimit v1.0.1
github.com/stretchr/testify v1.10.0 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
go.etcd.io/bbolt v1.4.0 github.com/kr/pretty v0.1.0 // indirect
golang.org/x/crypto v0.37.0 github.com/mitchellh/gox v1.0.1 // indirect
) github.com/refraction-networking/utls v0.0.0-20190909200633-43c36d3c1f57
github.com/sirupsen/logrus v1.5.0
require ( github.com/stephens2424/writerset v1.0.2 // indirect
github.com/andybalholm/brotli v1.1.1 // indirect github.com/stretchr/testify v1.3.0
github.com/cloudflare/circl v1.6.1 // indirect go.etcd.io/bbolt v1.3.4
github.com/davecgh/go-spew v1.1.1 // indirect golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
github.com/klauspost/compress v1.18.0 // indirect golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
github.com/kr/pretty v0.3.1 // indirect golang.org/x/tools v0.0.0-20201015182029-a5d9e455e9c4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
golang.org/x/sys v0.32.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

125
go.sum
View File

@ -1,61 +1,80 @@
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/cbeuw/connutil v0.0.0-20200411160121-c5a5c4a9de14 h1:bWJKlzTJR7C9DX0l1qhkTaP1lTEBWVDKhg8C/tNJqKg=
github.com/cbeuw/connutil v0.0.0-20200411215123-966bfaa51ee3 h1:LRxW8pdmWmyhoNh+TxUjxsAinGtCsVGjsl3xg6zoRSs= github.com/cbeuw/connutil v0.0.0-20200411160121-c5a5c4a9de14/go.mod h1:6jR2SzckGv8hIIS9zWJ160mzGVVOYp4AXZMDtacL6LE=
github.com/cbeuw/connutil v0.0.0-20200411215123-966bfaa51ee3/go.mod h1:6jR2SzckGv8hIIS9zWJ160mzGVVOYp4AXZMDtacL6LE=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/dvyukov/go-fuzz v0.0.0-20201003075337-90825f39c90b h1:CXfDl9Y3NKuhOSxF9kXhiLmuYCdufQDrLY2fO1BzqBU=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/dvyukov/go-fuzz v0.0.0-20201003075337-90825f39c90b/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/refraction-networking/utls v1.6.6 h1:igFsYBUJPYM8Rno9xUuDoM5GQrVEqY4llzEXOkL43Ig= github.com/refraction-networking/utls v0.0.0-20190909200633-43c36d3c1f57 h1:SL1K0QAuC1b54KoY1pjPWe6kSlsFHwK9/oC960fKrTY=
github.com/refraction-networking/utls v1.6.6/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0= github.com/refraction-networking/utls v0.0.0-20190909200633-43c36d3c1f57/go.mod h1:tz9gX959MEFfFN5whTIocCLUG57WiILqtdVxI8c6Wj0=
github.com/refraction-networking/utls v1.7.0/go.mod h1:lV0Gwc1/Fi+HYH8hOtgFRdHfKo4FKSn6+FdyOz9hRms= github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
github.com/refraction-networking/utls v1.7.3 h1:L0WRhHY7Oq1T0zkdzVZMR6zWZv+sXbHB9zcuvsAEqCo= github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/refraction-networking/utls v1.7.3/go.mod h1:TUhh27RHMGtQvjQq+RyO11P6ZNQNBb3N0v7wsEjKAIQ= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/refraction-networking/utls v1.8.0 h1:L38krhiTAyj9EeiQQa2sg+hYb4qwLCqdMcpZrRfbONE= github.com/stephens2424/writerset v1.0.2 h1:znRLgU6g8RS5euYRcy004XeE4W+Tu44kALzy7ghPif8=
github.com/refraction-networking/utls v1.8.0/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201015182029-a5d9e455e9c4 h1:rQWkJiVIyJ3PgiSHL+RXc8xbrK8duU6jG5eeZ9G7nk8=
golang.org/x/tools v0.0.0-20201015182029-a5d9e455e9c4/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -1,11 +1,10 @@
package client package client
import ( import (
"encoding/binary"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
utls "github.com/refraction-networking/utls"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"net" "net"
"strings"
) )
const appDataMaxLength = 16401 const appDataMaxLength = 16401
@ -14,125 +13,59 @@ type clientHelloFields struct {
random []byte random []byte
sessionId []byte sessionId []byte
x25519KeyShare []byte x25519KeyShare []byte
serverName string sni []byte
} }
type browser int type browser interface {
composeClientHello(clientHelloFields) []byte
}
const ( func makeServerName(serverName string) []byte {
chrome = iota serverNameListLength := make([]byte, 2)
firefox binary.BigEndian.PutUint16(serverNameListLength, uint16(len(serverName)+3))
safari serverNameType := []byte{0x00} // host_name
) serverNameLength := make([]byte, 2)
binary.BigEndian.PutUint16(serverNameLength, uint16(len(serverName)))
ret := make([]byte, 2+1+2+len(serverName))
copy(ret[0:2], serverNameListLength)
copy(ret[2:3], serverNameType)
copy(ret[3:5], serverNameLength)
copy(ret[5:], serverName)
return ret
}
// addExtensionRecord, add type, length to extension data
func addExtRec(typ []byte, data []byte) []byte {
length := make([]byte, 2)
binary.BigEndian.PutUint16(length, uint16(len(data)))
ret := make([]byte, 2+2+len(data))
copy(ret[0:2], typ)
copy(ret[2:4], length)
copy(ret[4:], data)
return ret
}
func genStegClientHello(ai authenticationPayload, serverName string) (ret clientHelloFields) {
// random is marshalled ephemeral pub key 32 bytes
// The authentication ciphertext and its tag are then distributed among SessionId and X25519KeyShare
ret.random = ai.randPubKey[:]
ret.sessionId = ai.ciphertextWithTag[0:32]
ret.x25519KeyShare = ai.ciphertextWithTag[32:64]
ret.sni = makeServerName(serverName)
return
}
type DirectTLS struct { type DirectTLS struct {
*common.TLSConn *common.TLSConn
browser browser browser browser
} }
var topLevelDomains = []string{"com", "net", "org", "it", "fr", "me", "ru", "cn", "es", "tr", "top", "xyz", "info"} // NewClientTransport handles the TLS handshake for a given conn and returns the sessionKey
func randomServerName() string {
/*
Copyright: Proton AG
https://github.com/ProtonVPN/wireguard-go/commit/bcf344b39b213c1f32147851af0d2a8da9266883
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
charNum := int('z') - int('a') + 1
size := 3 + common.RandInt(10)
name := make([]byte, size)
for i := range name {
name[i] = byte(int('a') + common.RandInt(charNum))
}
return string(name) + "." + common.RandItem(topLevelDomains)
}
func buildClientHello(browser browser, fields clientHelloFields) ([]byte, error) {
// We don't use utls to handle connections (as it'll attempt a real TLS negotiation)
// We only want it to build the ClientHello locally
fakeConn := net.TCPConn{}
var helloID utls.ClientHelloID
switch browser {
case chrome:
helloID = utls.HelloChrome_Auto
case firefox:
helloID = utls.HelloFirefox_Auto
case safari:
helloID = utls.HelloSafari_Auto
}
uclient := utls.UClient(&fakeConn, &utls.Config{ServerName: fields.serverName}, helloID)
if err := uclient.BuildHandshakeState(); err != nil {
return []byte{}, err
}
if err := uclient.SetClientRandom(fields.random); err != nil {
return []byte{}, err
}
uclient.HandshakeState.Hello.SessionId = make([]byte, 32)
copy(uclient.HandshakeState.Hello.SessionId, fields.sessionId)
// Find the X25519 key share and overwrite it
var extIndex int
var keyShareIndex int
for i, ext := range uclient.Extensions {
ext, ok := ext.(*utls.KeyShareExtension)
if ok {
extIndex = i
for j, keyShare := range ext.KeyShares {
if keyShare.Group == utls.X25519 {
keyShareIndex = j
}
}
}
}
copy(uclient.Extensions[extIndex].(*utls.KeyShareExtension).KeyShares[keyShareIndex].Data, fields.x25519KeyShare)
if err := uclient.BuildHandshakeState(); err != nil {
return []byte{}, err
}
return uclient.HandshakeState.Hello.Raw, nil
}
// Handshake handles the TLS handshake for a given conn and returns the sessionKey
// if the server proceed with Cloak authentication // if the server proceed with Cloak authentication
func (tls *DirectTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) { func (tls *DirectTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) {
payload, sharedSecret := makeAuthenticationPayload(authInfo) payload, sharedSecret := makeAuthenticationPayload(authInfo)
chOnly := tls.browser.composeClientHello(genStegClientHello(payload, authInfo.MockDomain))
fields := clientHelloFields{ chWithRecordLayer := common.AddRecordLayer(chOnly, common.Handshake, common.VersionTLS11)
random: payload.randPubKey[:],
sessionId: payload.ciphertextWithTag[0:32],
x25519KeyShare: payload.ciphertextWithTag[32:64],
serverName: authInfo.MockDomain,
}
if strings.EqualFold(fields.serverName, "random") {
fields.serverName = randomServerName()
}
var ch []byte
ch, err = buildClientHello(tls.browser, fields)
if err != nil {
return
}
chWithRecordLayer := common.AddRecordLayer(ch, common.Handshake, common.VersionTLS11)
_, err = rawConn.Write(chWithRecordLayer) _, err = rawConn.Write(chWithRecordLayer)
if err != nil { if err != nil {
return return

View File

@ -0,0 +1,43 @@
package client
import (
"bytes"
"encoding/hex"
"testing"
)
func htob(s string) []byte {
b, _ := hex.DecodeString(s)
return b
}
func TestMakeServerName(t *testing.T) {
type testingPair struct {
serverName string
target []byte
}
pairs := []testingPair{
{
"www.google.com",
htob("001100000e7777772e676f6f676c652e636f6d"),
},
{
"www.gstatic.com",
htob("001200000f7777772e677374617469632e636f6d"),
},
{
"googleads.g.doubleclick.net",
htob("001e00001b676f6f676c656164732e672e646f75626c65636c69636b2e6e6574"),
},
}
for _, p := range pairs {
if !bytes.Equal(makeServerName(p.serverName), p.target) {
t.Error(
"for", p.serverName,
"expecting", p.target,
"got", makeServerName(p.serverName))
}
}
}

View File

@ -2,10 +2,8 @@ package client
import ( import (
"encoding/binary" "encoding/binary"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
log "github.com/sirupsen/logrus"
) )
const ( const (
@ -28,28 +26,21 @@ func makeAuthenticationPayload(authInfo AuthInfo) (ret authenticationPayload, sh
| 16 bytes | 12 bytes | 1 byte | 8 bytes | 4 bytes | 1 byte | 6 bytes | | 16 bytes | 12 bytes | 1 byte | 8 bytes | 4 bytes | 1 byte | 6 bytes |
+----------+----------------+---------------------+-------------+--------------+--------+------------+ +----------+----------------+---------------------+-------------+--------------+--------+------------+
*/ */
ephPv, ephPub, err := ecdh.GenerateKey(authInfo.WorldState.Rand) ephPv, ephPub, _ := ecdh.GenerateKey(authInfo.WorldState.Rand)
if err != nil {
log.Panicf("failed to generate ephemeral key pair: %v", err)
}
copy(ret.randPubKey[:], ecdh.Marshal(ephPub)) copy(ret.randPubKey[:], ecdh.Marshal(ephPub))
plaintext := make([]byte, 48) plaintext := make([]byte, 48)
copy(plaintext, authInfo.UID) copy(plaintext, authInfo.UID)
copy(plaintext[16:28], authInfo.ProxyMethod) copy(plaintext[16:28], authInfo.ProxyMethod)
plaintext[28] = authInfo.EncryptionMethod plaintext[28] = authInfo.EncryptionMethod
binary.BigEndian.PutUint64(plaintext[29:37], uint64(authInfo.WorldState.Now().UTC().Unix())) binary.BigEndian.PutUint64(plaintext[29:37], uint64(authInfo.WorldState.Now().Unix()))
binary.BigEndian.PutUint32(plaintext[37:41], authInfo.SessionId) binary.BigEndian.PutUint32(plaintext[37:41], authInfo.SessionId)
if authInfo.Unordered { if authInfo.Unordered {
plaintext[41] |= UNORDERED_FLAG plaintext[41] |= UNORDERED_FLAG
} }
secret, err := ecdh.GenerateSharedSecret(ephPv, authInfo.ServerPubKey) copy(sharedSecret[:], ecdh.GenerateSharedSecret(ephPv, authInfo.ServerPubKey))
if err != nil {
log.Panicf("error in generating shared secret: %v", err)
}
copy(sharedSecret[:], secret)
ciphertextWithTag, _ := common.AESGCMEncrypt(ret.randPubKey[:12], sharedSecret[:], plaintext) ciphertextWithTag, _ := common.AESGCMEncrypt(ret.randPubKey[:12], sharedSecret[:], plaintext)
copy(ret.ciphertextWithTag[:], ciphertextWithTag[:]) copy(ret.ciphertextWithTag[:], ciphertextWithTag[:])
return return

View File

@ -2,12 +2,10 @@ package client
import ( import (
"bytes" "bytes"
"testing"
"time"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/multiplex" "github.com/cbeuw/Cloak/internal/multiplex"
"github.com/stretchr/testify/assert" "testing"
"time"
) )
func TestMakeAuthenticationPayload(t *testing.T) { func TestMakeAuthenticationPayload(t *testing.T) {
@ -29,7 +27,7 @@ func TestMakeAuthenticationPayload(t *testing.T) {
0x01, 0xd0, 0xb4, 0x87, 0x86, 0x9c, 0x15, 0x9b, 0x01, 0xd0, 0xb4, 0x87, 0x86, 0x9c, 0x15, 0x9b,
0x86, 0x19, 0x53, 0x6e, 0x60, 0xe9, 0x51, 0x42}, 0x86, 0x19, 0x53, 0x6e, 0x60, 0xe9, 0x51, 0x42},
ProxyMethod: "shadowsocks", ProxyMethod: "shadowsocks",
EncryptionMethod: multiplex.EncryptionMethodPlain, EncryptionMethod: multiplex.E_METHOD_PLAIN,
MockDomain: "d2jkinvisak5y9.cloudfront.net", MockDomain: "d2jkinvisak5y9.cloudfront.net",
WorldState: common.WorldState{ WorldState: common.WorldState{
Rand: bytes.NewBuffer([]byte{ Rand: bytes.NewBuffer([]byte{
@ -66,8 +64,12 @@ func TestMakeAuthenticationPayload(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
func() { func() {
payload, sharedSecret := makeAuthenticationPayload(tc.authInfo) payload, sharedSecret := makeAuthenticationPayload(tc.authInfo)
assert.Equal(t, tc.expPayload, payload, "payload doesn't match") if payload != tc.expPayload {
assert.Equal(t, tc.expSecret, sharedSecret, "shared secret doesn't match") t.Errorf("payload doesn't match:\nexp %v\ngot %v", tc.expPayload, payload)
}
if sharedSecret != tc.expSecret {
t.Errorf("secret doesn't match:\nexp %x\ngot %x", tc.expPayload, payload)
}
}() }()
} }
} }

103
internal/client/chrome.go Normal file
View File

@ -0,0 +1,103 @@
// Fingerprint of Chrome 85
package client
import (
"encoding/binary"
"encoding/hex"
"github.com/cbeuw/Cloak/internal/common"
)
type Chrome struct{}
func makeGREASE() []byte {
// see https://tools.ietf.org/html/draft-davidben-tls-grease-01
// This is exclusive to Chrome.
var one [1]byte
common.CryptoRandRead(one[:])
sixteenth := one[0] % 16
monoGREASE := sixteenth*16 + 0xA
doubleGREASE := []byte{monoGREASE, monoGREASE}
return doubleGREASE
}
func (c *Chrome) composeExtensions(sni []byte, keyShare []byte) []byte {
makeSupportedGroups := func() []byte {
suppGroupListLen := []byte{0x00, 0x08}
ret := make([]byte, 2+8)
copy(ret[0:2], suppGroupListLen)
copy(ret[2:4], makeGREASE())
copy(ret[4:], []byte{0x00, 0x1d, 0x00, 0x17, 0x00, 0x18})
return ret
}
makeKeyShare := func(hidden []byte) []byte {
ret := make([]byte, 43)
ret[0], ret[1] = 0x00, 0x29 // length 41
copy(ret[2:4], makeGREASE())
ret[4], ret[5] = 0x00, 0x01 // length 1
ret[6] = 0x00
ret[7], ret[8] = 0x00, 0x1d // group x25519
ret[9], ret[10] = 0x00, 0x20 // length 32
copy(ret[11:43], hidden)
return ret
}
// extension length is always 403, and server name length is variable
var ext [17][]byte
ext[0] = addExtRec(makeGREASE(), nil) // First GREASE
ext[1] = addExtRec([]byte{0x00, 0x00}, sni) // server name indication
ext[2] = addExtRec([]byte{0x00, 0x17}, nil) // extended_master_secret
ext[3] = addExtRec([]byte{0xff, 0x01}, []byte{0x00}) // renegotiation_info
ext[4] = addExtRec([]byte{0x00, 0x0a}, makeSupportedGroups()) // supported groups
ext[5] = addExtRec([]byte{0x00, 0x0b}, []byte{0x01, 0x00}) // ec point formats
ext[6] = addExtRec([]byte{0x00, 0x23}, nil) // Session tickets
APLN, _ := hex.DecodeString("000c02683208687474702f312e31")
ext[7] = addExtRec([]byte{0x00, 0x10}, APLN) // app layer proto negotiation
ext[8] = addExtRec([]byte{0x00, 0x05}, []byte{0x01, 0x00, 0x00, 0x00, 0x00}) // status request
sigAlgo, _ := hex.DecodeString("001004030804040105030805050108060601")
ext[9] = addExtRec([]byte{0x00, 0x0d}, sigAlgo) // Signature Algorithms
ext[10] = addExtRec([]byte{0x00, 0x12}, nil) // signed cert timestamp
ext[11] = addExtRec([]byte{0x00, 0x33}, makeKeyShare(keyShare)) // key share
ext[12] = addExtRec([]byte{0x00, 0x2d}, []byte{0x01, 0x01}) // psk key exchange modes
suppVersions, _ := hex.DecodeString("0a9A9A0304030303020301") // 9A9A needs to be a GREASE
copy(suppVersions[1:3], makeGREASE())
ext[13] = addExtRec([]byte{0x00, 0x2b}, suppVersions) // supported versions
ext[14] = addExtRec([]byte{0x00, 0x1b}, []byte{0x02, 0x00, 0x02}) // compress certificate
ext[15] = addExtRec(makeGREASE(), []byte{0x00}) // Last GREASE
// len(ext[1]) + 170 + len(ext[16]) = 403
// len(ext[16]) = 233 - len(ext[1])
// 2+2+len(padding) = 233 - len(ext[1])
// len(padding) = 229 - len(ext[1])
ext[16] = addExtRec([]byte{0x00, 0x15}, make([]byte, 229-len(ext[1]))) // padding
var ret []byte
for _, e := range ext {
ret = append(ret, e...)
}
return ret
}
func (c *Chrome) composeClientHello(hd clientHelloFields) (ch []byte) {
var clientHello [12][]byte
clientHello[0] = []byte{0x01} // handshake type
clientHello[1] = []byte{0x00, 0x01, 0xfc} // length 508
clientHello[2] = []byte{0x03, 0x03} // client version
clientHello[3] = hd.random // random
clientHello[4] = []byte{0x20} // session id length 32
clientHello[5] = hd.sessionId // session id
clientHello[6] = []byte{0x00, 0x20} // cipher suites length 34
cipherSuites, _ := hex.DecodeString("130113021303c02bc02fc02cc030cca9cca8c013c014009c009d002f0035")
clientHello[7] = append(makeGREASE(), cipherSuites...) // cipher suites
clientHello[8] = []byte{0x01} // compression methods length 1
clientHello[9] = []byte{0x00} // compression methods
clientHello[11] = c.composeExtensions(hd.sni, hd.x25519KeyShare)
clientHello[10] = []byte{0x00, 0x00} // extensions length 403
binary.BigEndian.PutUint16(clientHello[10], uint16(len(clientHello[11])))
var ret []byte
for _, c := range clientHello {
ret = append(ret, c...)
}
return ret
}

View File

@ -0,0 +1,48 @@
package client
import (
"encoding/hex"
"testing"
)
func TestMakeGREASE(t *testing.T) {
a := hex.EncodeToString(makeGREASE())
if a[1] != 'a' || a[3] != 'a' {
t.Errorf("GREASE got %v", a)
}
var GREASEs []string
for i := 0; i < 50; i++ {
GREASEs = append(GREASEs, hex.EncodeToString(makeGREASE()))
}
var eqCount int
for _, g := range GREASEs {
if a == g {
eqCount++
}
}
if eqCount > 40 {
t.Error("GREASE is not random", GREASEs)
}
}
func TestComposeExtension(t *testing.T) {
serverName := "github.com"
keyShare, _ := hex.DecodeString("690f074f5c01756982269b66d58c90c47dc0f281d654c7b2c16f63c9033f5604")
sni := makeServerName(serverName)
result := (&Chrome{}).composeExtensions(sni, keyShare)
target, _ := hex.DecodeString("8a8a00000000000f000d00000a6769746875622e636f6d00170000ff01000100000a000a00088a8a001d00170018000b00020100002300000010000e000c02683208687474702f312e31000500050100000000000d0012001004030804040105030805050108060601001200000033002b00298a8a000100001d0020690f074f5c01756982269b66d58c90c47dc0f281d654c7b2c16f63c9033f5604002d00020101002b000b0a3a3a0304030303020301001b00030200024a4a000100001500d2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
for p := 0; p < len(result); p++ {
if result[p] != target[p] {
if result[p]&0x0F == 0xA && target[p]&0x0F == 0xA &&
((p > 0 && result[p-1] == result[p] && target[p-1] == target[p]) ||
(p < len(result)-1 && result[p+1] == result[p] && target[p+1] == target[p])) {
continue
}
t.Errorf("inequality at %v", p)
}
}
}

View File

@ -1,13 +1,12 @@
package client package client
import ( import (
"github.com/cbeuw/Cloak/internal/common"
"net" "net"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -21,10 +20,8 @@ func MakeSession(connConfig RemoteConnConfig, authInfo AuthInfo, dialer common.D
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < connConfig.NumConn; i++ { for i := 0; i < connConfig.NumConn; i++ {
wg.Add(1) wg.Add(1)
transportConfig := connConfig.Transport
go func() { go func() {
makeconn: makeconn:
transportConn := transportConfig.CreateTransport()
remoteConn, err := dialer.Dial("tcp", connConfig.RemoteAddr) remoteConn, err := dialer.Dial("tcp", connConfig.RemoteAddr)
if err != nil { if err != nil {
log.Errorf("Failed to establish new connections to remote: %v", err) log.Errorf("Failed to establish new connections to remote: %v", err)
@ -33,20 +30,12 @@ func MakeSession(connConfig RemoteConnConfig, authInfo AuthInfo, dialer common.D
goto makeconn goto makeconn
} }
transportConn := connConfig.TransportMaker()
sk, err := transportConn.Handshake(remoteConn, authInfo) sk, err := transportConn.Handshake(remoteConn, authInfo)
if err != nil { if err != nil {
log.Errorf("Failed to prepare connection to remote: %v", err)
transportConn.Close() transportConn.Close()
log.Errorf("Failed to prepare connection to remote: %v", err)
// In Cloak v2.11.0, we've updated uTLS version and subsequently increased the first packet size for chrome above 1500
// https://github.com/cbeuw/Cloak/pull/306#issuecomment-2862728738. As a backwards compatibility feature, if we fail
// to connect using chrome signature, retry with firefox which has a smaller packet size.
if transportConfig.mode == "direct" && transportConfig.browser == chrome {
transportConfig.browser = firefox
log.Warnf("failed to connect with chrome signature, falling back to retry with firefox")
}
time.Sleep(time.Second * 3) time.Sleep(time.Second * 3)
goto makeconn goto makeconn
} }
// sessionKey given by each connection should be identical // sessionKey given by each connection should be identical

View File

@ -0,0 +1,77 @@
// Fingerprint of Firefox 68
package client
import (
"encoding/binary"
"encoding/hex"
"github.com/cbeuw/Cloak/internal/common"
)
type Firefox struct{}
func (f *Firefox) composeExtensions(SNI []byte, keyShare []byte) []byte {
composeKeyShare := func(hidden []byte) []byte {
ret := make([]byte, 107)
ret[0], ret[1] = 0x00, 0x69 // length 105
ret[2], ret[3] = 0x00, 0x1d // group x25519
ret[4], ret[5] = 0x00, 0x20 // length 32
copy(ret[6:38], hidden)
ret[38], ret[39] = 0x00, 0x17 // group secp256r1
ret[40], ret[41] = 0x00, 0x41 // length 65
common.CryptoRandRead(ret[42:107])
return ret
}
// extension length is always 399, and server name length is variable
var ext [14][]byte
ext[0] = addExtRec([]byte{0x00, 0x00}, SNI) // server name indication
ext[1] = addExtRec([]byte{0x00, 0x17}, nil) // extended_master_secret
ext[2] = addExtRec([]byte{0xff, 0x01}, []byte{0x00}) // renegotiation_info
suppGroup, _ := hex.DecodeString("000c001d00170018001901000101")
ext[3] = addExtRec([]byte{0x00, 0x0a}, suppGroup) // supported groups
ext[4] = addExtRec([]byte{0x00, 0x0b}, []byte{0x01, 0x00}) // ec point formats
ext[5] = addExtRec([]byte{0x00, 0x23}, []byte{}) // Session tickets
APLN, _ := hex.DecodeString("000c02683208687474702f312e31")
ext[6] = addExtRec([]byte{0x00, 0x10}, APLN) // app layer proto negotiation
ext[7] = addExtRec([]byte{0x00, 0x05}, []byte{0x01, 0x00, 0x00, 0x00, 0x00}) // status request
ext[8] = addExtRec([]byte{0x00, 0x33}, composeKeyShare(keyShare)) // key share
suppVersions, _ := hex.DecodeString("080304030303020301")
ext[9] = addExtRec([]byte{0x00, 0x2b}, suppVersions) // supported versions
sigAlgo, _ := hex.DecodeString("001604030503060308040805080604010501060102030201")
ext[10] = addExtRec([]byte{0x00, 0x0d}, sigAlgo) // Signature Algorithms
ext[11] = addExtRec([]byte{0x00, 0x2d}, []byte{0x01, 0x01}) // psk key exchange modes
ext[12] = addExtRec([]byte{0x00, 0x1c}, []byte{0x40, 0x01}) // record size limit
// len(ext[0]) + 237 + 4 + len(padding) = 399
// len(padding) = 158 - len(ext[0])
ext[13] = addExtRec([]byte{0x00, 0x15}, make([]byte, 163-len(SNI))) // padding
var ret []byte
for _, e := range ext {
ret = append(ret, e...)
}
return ret
}
func (f *Firefox) composeClientHello(hd clientHelloFields) (ch []byte) {
var clientHello [12][]byte
clientHello[0] = []byte{0x01} // handshake type
clientHello[1] = []byte{0x00, 0x01, 0xfc} // length 508
clientHello[2] = []byte{0x03, 0x03} // client version
clientHello[3] = hd.random // random
clientHello[4] = []byte{0x20} // session id length 32
clientHello[5] = hd.sessionId // session id
clientHello[6] = []byte{0x00, 0x24} // cipher suites length 36
cipherSuites, _ := hex.DecodeString("130113031302c02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a")
clientHello[7] = cipherSuites // cipher suites
clientHello[8] = []byte{0x01} // compression methods length 1
clientHello[9] = []byte{0x00} // compression methods
clientHello[11] = f.composeExtensions(hd.sni, hd.x25519KeyShare)
clientHello[10] = []byte{0x00, 0x00} // extensions length
binary.BigEndian.PutUint16(clientHello[10], uint16(len(clientHello[11])))
var ret []byte
for _, c := range clientHello {
ret = append(ret, c...)
}
return ret
}

View File

@ -0,0 +1,20 @@
package client
import (
"bytes"
"encoding/hex"
"testing"
)
func TestComposeExtensions(t *testing.T) {
target, _ := hex.DecodeString("000000170015000012636f6e73656e742e676f6f676c652e636f6d00170000ff01000100000a000e000c001d00170018001901000101000b00020100002300000010000e000c02683208687474702f312e310005000501000000000033006b0069001d00206075db0a43812b2e4e0f44157f04295b484ccfc6d70e577c1e6113aa18e088270017004104948052ae52043e654641660ebbadb527c8280262e61f64b0f6f1794f32e1000865a49e4cbe2027c78e7180861e4336300815fa0f1b0091c4d788b97f809a47d3002b0009080304030303020301000d0018001604030503060308040805080604010501060102030201002d00020101001c000240010015008c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
serverName := "consent.google.com"
keyShare, _ := hex.DecodeString("6075db0a43812b2e4e0f44157f04295b484ccfc6d70e577c1e6113aa18e08827")
sni := makeServerName(serverName)
result := (&Firefox{}).composeExtensions(sni, keyShare)
// skip random secp256r1
if !bytes.Equal(result[:137], target[:137]) || !bytes.Equal(result[202:], target[202:]) {
t.Errorf("got %x", result)
}
}

View File

@ -1,18 +1,16 @@
package client package client
import ( import (
"github.com/cbeuw/Cloak/internal/common"
"io" "io"
"net" "net"
"sync"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration, singleplex bool, newSeshFunc func() *mux.Session) { func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration, newSeshFunc func() *mux.Session) {
var sesh *mux.Session var sesh *mux.Session
localConn, err := bindFunc() localConn, err := bindFunc()
if err != nil { if err != nil {
@ -20,7 +18,6 @@ func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration
} }
streams := make(map[string]*mux.Stream) streams := make(map[string]*mux.Stream)
var streamsMutex sync.Mutex
data := make([]byte, 8192) data := make([]byte, 8192)
for { for {
@ -30,31 +27,22 @@ func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration
continue continue
} }
if !singleplex && (sesh == nil || sesh.IsClosed()) { if sesh == nil || sesh.IsClosed() || sesh.Singleplex {
sesh = newSeshFunc() sesh = newSeshFunc()
} }
streamsMutex.Lock()
stream, ok := streams[addr.String()] stream, ok := streams[addr.String()]
if !ok { if !ok {
if singleplex {
sesh = newSeshFunc()
}
stream, err = sesh.OpenStream() stream, err = sesh.OpenStream()
if err != nil { if err != nil {
if singleplex { log.Errorf("Failed to open stream: %v", err)
if sesh.Singleplex {
sesh.Close() sesh.Close()
} }
log.Errorf("Failed to open stream: %v", err)
streamsMutex.Unlock()
continue continue
} }
streams[addr.String()] = stream streams[addr.String()] = stream
streamsMutex.Unlock()
_ = stream.SetReadDeadline(time.Now().Add(streamTimeout))
proxyAddr := addr proxyAddr := addr
go func(stream *mux.Stream, localConn *net.UDPConn) { go func(stream *mux.Stream, localConn *net.UDPConn) {
buf := make([]byte, 8192) buf := make([]byte, 8192)
@ -62,40 +50,31 @@ func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration
n, err := stream.Read(buf) n, err := stream.Read(buf)
if err != nil { if err != nil {
log.Tracef("copying stream to proxy client: %v", err) log.Tracef("copying stream to proxy client: %v", err)
break stream.Close()
return
} }
_ = stream.SetReadDeadline(time.Now().Add(streamTimeout))
_, err = localConn.WriteTo(buf[:n], proxyAddr) _, err = localConn.WriteTo(buf[:n], proxyAddr)
if err != nil { if err != nil {
log.Tracef("copying stream to proxy client: %v", err) log.Tracef("copying stream to proxy client: %v", err)
break stream.Close()
return
} }
} }
streamsMutex.Lock()
delete(streams, addr.String())
streamsMutex.Unlock()
stream.Close()
return
}(stream, localConn) }(stream, localConn)
} else {
streamsMutex.Unlock()
} }
_, err = stream.Write(data[:i]) _, err = stream.Write(data[:i])
if err != nil { if err != nil {
log.Tracef("copying proxy client to stream: %v", err) log.Tracef("copying proxy client to stream: %v", err)
streamsMutex.Lock()
delete(streams, addr.String()) delete(streams, addr.String())
streamsMutex.Unlock()
stream.Close() stream.Close()
continue continue
} }
_ = stream.SetReadDeadline(time.Now().Add(streamTimeout))
} }
} }
func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex bool, newSeshFunc func() *mux.Session) { func RouteTCP(listener net.Listener, streamTimeout time.Duration, newSeshFunc func() *mux.Session) {
var sesh *mux.Session var sesh *mux.Session
for { for {
localConn, err := listener.Accept() localConn, err := listener.Accept()
@ -103,30 +82,23 @@ func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex boo
log.Fatal(err) log.Fatal(err)
continue continue
} }
if !singleplex && (sesh == nil || sesh.IsClosed()) { if sesh == nil || sesh.IsClosed() || sesh.Singleplex {
sesh = newSeshFunc() sesh = newSeshFunc()
} }
go func(sesh *mux.Session, localConn net.Conn, timeout time.Duration) { go func(sesh *mux.Session, localConn net.Conn) {
if singleplex {
sesh = newSeshFunc()
}
data := make([]byte, 10240) data := make([]byte, 10240)
_ = localConn.SetReadDeadline(time.Now().Add(streamTimeout))
i, err := io.ReadAtLeast(localConn, data, 1) i, err := io.ReadAtLeast(localConn, data, 1)
if err != nil { if err != nil {
log.Errorf("Failed to read first packet from proxy client: %v", err) log.Errorf("Failed to read first packet from proxy client: %v", err)
localConn.Close() localConn.Close()
return return
} }
var zeroTime time.Time
_ = localConn.SetReadDeadline(zeroTime)
stream, err := sesh.OpenStream() stream, err := sesh.OpenStream()
if err != nil { if err != nil {
log.Errorf("Failed to open stream: %v", err) log.Errorf("Failed to open stream: %v", err)
localConn.Close() localConn.Close()
if singleplex { if sesh.Singleplex {
sesh.Close() sesh.Close()
} }
return return
@ -140,6 +112,7 @@ func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex boo
return return
} }
stream.SetReadFromTimeout(streamTimeout) // if localConn hasn't sent anything to stream to a period of time, stream closes
go func() { go func() {
if _, err := common.Copy(localConn, stream); err != nil { if _, err := common.Copy(localConn, stream); err != nil {
log.Tracef("copying stream to proxy client: %v", err) log.Tracef("copying stream to proxy client: %v", err)
@ -148,6 +121,7 @@ func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex boo
if _, err = common.Copy(stream, localConn); err != nil { if _, err = common.Copy(stream, localConn); err != nil {
log.Tracef("copying proxy client to stream: %v", err) log.Tracef("copying proxy client to stream: %v", err)
} }
}(sesh, localConn, streamTimeout) }(sesh, localConn)
} }
} }

View File

@ -4,14 +4,13 @@ import (
"crypto" "crypto"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus"
"io/ioutil" "io/ioutil"
"net" "net"
"strings" "strings"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
) )
@ -27,33 +26,30 @@ type RawConfig struct {
UID []byte UID []byte
PublicKey []byte PublicKey []byte
NumConn int NumConn int
LocalHost string // jsonOptional LocalHost string // jsonOptional
LocalPort string // jsonOptional LocalPort string // jsonOptional
RemoteHost string // jsonOptional RemoteHost string // jsonOptional
RemotePort string // jsonOptional RemotePort string // jsonOptional
AlternativeNames []string // jsonOptional
// defaults set in ProcessRawConfig // defaults set in ProcessRawConfig
UDP bool // nullable UDP bool // nullable
BrowserSig string // nullable BrowserSig string // nullable
Transport string // nullable Transport string // nullable
CDNOriginHost string // nullable
CDNWsUrlPath string // nullable
StreamTimeout int // nullable StreamTimeout int // nullable
KeepAlive int // nullable KeepAlive int // nullable
} }
type RemoteConnConfig struct { type RemoteConnConfig struct {
Singleplex bool Singleplex bool
NumConn int NumConn int
KeepAlive time.Duration KeepAlive time.Duration
RemoteAddr string RemoteAddr string
Transport TransportConfig TransportMaker func() Transport
} }
type LocalConnConfig struct { type LocalConnConfig struct {
LocalAddr string LocalAddr string
Timeout time.Duration Timeout time.Duration
MockDomainList []string
} }
type AuthInfo struct { type AuthInfo struct {
@ -97,20 +93,6 @@ func ssvToJson(ssv string) (ret []byte) {
} }
key := sp[0] key := sp[0]
value := sp[1] value := sp[1]
if strings.HasPrefix(key, "AlternativeNames") {
switch strings.Contains(value, ",") {
case true:
domains := strings.Split(value, ",")
for index, domain := range domains {
domains[index] = `"` + domain + `"`
}
value = strings.Join(domains, ",")
ret = append(ret, []byte(`"`+key+`":[`+value+`],`)...)
case false:
ret = append(ret, []byte(`"`+key+`":["`+value+`"],`)...)
}
continue
}
// JSON doesn't like quotation marks around int and bool // JSON doesn't like quotation marks around int and bool
// This is extremely ugly but it's still better than writing a tokeniser // This is extremely ugly but it's still better than writing a tokeniser
if elem(key, unquoted) { if elem(key, unquoted) {
@ -156,17 +138,6 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
return nullErr("ServerName") return nullErr("ServerName")
} }
auth.MockDomain = raw.ServerName auth.MockDomain = raw.ServerName
var filteredAlternativeNames []string
for _, alternativeName := range raw.AlternativeNames {
if len(alternativeName) > 0 {
filteredAlternativeNames = append(filteredAlternativeNames, alternativeName)
}
}
raw.AlternativeNames = filteredAlternativeNames
local.MockDomainList = raw.AlternativeNames
local.MockDomainList = append(local.MockDomainList, auth.MockDomain)
if raw.ProxyMethod == "" { if raw.ProxyMethod == "" {
return nullErr("ServerName") return nullErr("ServerName")
} }
@ -190,13 +161,11 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
// Encryption method // Encryption method
switch strings.ToLower(raw.EncryptionMethod) { switch strings.ToLower(raw.EncryptionMethod) {
case "plain": case "plain":
auth.EncryptionMethod = mux.EncryptionMethodPlain auth.EncryptionMethod = mux.E_METHOD_PLAIN
case "aes-gcm", "aes-256-gcm": case "aes-gcm":
auth.EncryptionMethod = mux.EncryptionMethodAES256GCM auth.EncryptionMethod = mux.E_METHOD_AES_GCM
case "aes-128-gcm":
auth.EncryptionMethod = mux.EncryptionMethodAES128GCM
case "chacha20-poly1305": case "chacha20-poly1305":
auth.EncryptionMethod = mux.EncryptionMethodChaha20Poly1305 auth.EncryptionMethod = mux.E_METHOD_CHACHA20_POLY1305
default: default:
err = fmt.Errorf("unknown encryption method %v", raw.EncryptionMethod) err = fmt.Errorf("unknown encryption method %v", raw.EncryptionMethod)
return return
@ -220,19 +189,10 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
// Transport and (if TLS mode), browser // Transport and (if TLS mode), browser
switch strings.ToLower(raw.Transport) { switch strings.ToLower(raw.Transport) {
case "cdn": case "cdn":
var cdnDomainPort string remote.TransportMaker = func() Transport {
if raw.CDNOriginHost == "" { return &WSOverTLS{
cdnDomainPort = net.JoinHostPort(raw.RemoteHost, raw.RemotePort) cdnDomainPort: remote.RemoteAddr,
} else { }
cdnDomainPort = net.JoinHostPort(raw.CDNOriginHost, raw.RemotePort)
}
if raw.CDNWsUrlPath == "" {
raw.CDNWsUrlPath = "/"
}
remote.Transport = TransportConfig{
mode: "cdn",
wsUrl: "ws://" + cdnDomainPort + raw.CDNWsUrlPath,
} }
case "direct": case "direct":
fallthrough fallthrough
@ -240,17 +200,16 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
var browser browser var browser browser
switch strings.ToLower(raw.BrowserSig) { switch strings.ToLower(raw.BrowserSig) {
case "firefox": case "firefox":
browser = firefox browser = &Firefox{}
case "safari":
browser = safari
case "chrome": case "chrome":
fallthrough fallthrough
default: default:
browser = chrome browser = &Chrome{}
} }
remote.Transport = TransportConfig{ remote.TransportMaker = func() Transport {
mode: "direct", return &DirectTLS{
browser: browser, browser: browser,
}
} }
} }

View File

@ -1,10 +1,9 @@
package client package client
import ( import (
"github.com/stretchr/testify/assert"
"io/ioutil" "io/ioutil"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
func TestParseConfig(t *testing.T) { func TestParseConfig(t *testing.T) {

View File

@ -8,26 +8,3 @@ type Transport interface {
Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error)
net.Conn net.Conn
} }
type TransportConfig struct {
mode string
wsUrl string
browser browser
}
func (t TransportConfig) CreateTransport() Transport {
switch t.mode {
case "cdn":
return &WSOverTLS{
wsUrl: t.wsUrl,
}
case "direct":
return &DirectTLS{
browser: t.browser,
}
default:
return nil
}
}

View File

@ -4,18 +4,17 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"net"
"net/http"
"net/url"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
utls "github.com/refraction-networking/utls" utls "github.com/refraction-networking/utls"
"net"
"net/http"
"net/url"
) )
type WSOverTLS struct { type WSOverTLS struct {
*common.WebSocketConn *common.WebSocketConn
wsUrl string cdnDomainPort string
} }
func (ws *WSOverTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) { func (ws *WSOverTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) {
@ -24,24 +23,12 @@ func (ws *WSOverTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey
InsecureSkipVerify: true, InsecureSkipVerify: true,
} }
uconn := utls.UClient(rawConn, utlsConfig, utls.HelloChrome_Auto) uconn := utls.UClient(rawConn, utlsConfig, utls.HelloChrome_Auto)
err = uconn.BuildHandshakeState()
if err != nil {
return
}
for i, extension := range uconn.Extensions {
_, ok := extension.(*utls.ALPNExtension)
if ok {
uconn.Extensions = append(uconn.Extensions[:i], uconn.Extensions[i+1:]...)
break
}
}
err = uconn.Handshake() err = uconn.Handshake()
if err != nil { if err != nil {
return return
} }
u, err := url.Parse(ws.wsUrl) u, err := url.Parse("ws://" + ws.cdnDomainPort)
if err != nil { if err != nil {
return sessionKey, fmt.Errorf("failed to parse ws url: %v", err) return sessionKey, fmt.Errorf("failed to parse ws url: %v", err)
} }

View File

@ -6,7 +6,6 @@ import (
"crypto/rand" "crypto/rand"
"errors" "errors"
"io" "io"
"math/big"
"time" "time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -53,8 +52,8 @@ func CryptoRandRead(buf []byte) {
RandRead(rand.Reader, buf) RandRead(rand.Reader, buf)
} }
func backoff(f func() error) { func RandRead(randSource io.Reader, buf []byte) {
err := f() _, err := randSource.Read(buf)
if err == nil { if err == nil {
return return
} }
@ -62,36 +61,12 @@ func backoff(f func() error) {
100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second,
3 * time.Second, 5 * time.Second} 3 * time.Second, 5 * time.Second}
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
log.Errorf("Failed to get random: %v. Retrying...", err) log.Errorf("Failed to get random bytes: %v. Retrying...", err)
err = f() _, err = randSource.Read(buf)
if err == nil { if err == nil {
return return
} }
time.Sleep(waitDur[i]) time.Sleep(waitDur[i])
} }
log.Fatal("Cannot get random after 10 retries") log.Fatal("Cannot get random bytes after 10 retries")
}
func RandRead(randSource io.Reader, buf []byte) {
backoff(func() error {
_, err := randSource.Read(buf)
return err
})
}
func RandItem[T any](list []T) T {
return list[RandInt(len(list))]
}
func RandInt(n int) int {
s := new(int)
backoff(func() error {
size, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
if err != nil {
return err
}
*s = int(size.Int64())
return nil
})
return *s
} }

View File

@ -4,11 +4,10 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"errors" "errors"
"github.com/stretchr/testify/assert"
"io" "io"
"math/rand" "math/rand"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
const gcmTagSize = 16 const gcmTagSize = 16

View File

@ -2,7 +2,6 @@ package common
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"io" "io"
"net" "net"
"sync" "sync"
@ -37,17 +36,18 @@ func AddRecordLayer(input []byte, typ byte, ver uint16) []byte {
type TLSConn struct { type TLSConn struct {
net.Conn net.Conn
writeBufPool sync.Pool writeM sync.Mutex
writeBuf []byte
} }
func NewTLSConn(conn net.Conn) *TLSConn { func NewTLSConn(conn net.Conn) *TLSConn {
writeBuf := make([]byte, initialWriteBufSize)
writeBuf[0] = ApplicationData
writeBuf[1] = byte(VersionTLS13 >> 8)
writeBuf[2] = byte(VersionTLS13 & 0xFF)
return &TLSConn{ return &TLSConn{
Conn: conn, Conn: conn,
writeBufPool: sync.Pool{New: func() interface{} { writeBuf: writeBuf,
b := make([]byte, 0, initialWriteBufSize)
b = append(b, ApplicationData, byte(VersionTLS13>>8), byte(VersionTLS13&0xFF))
return &b
}},
} }
} }
@ -95,15 +95,12 @@ func (tls *TLSConn) Read(buffer []byte) (n int, err error) {
func (tls *TLSConn) Write(in []byte) (n int, err error) { func (tls *TLSConn) Write(in []byte) (n int, err error) {
msgLen := len(in) msgLen := len(in)
if msgLen > 1<<14+256 { // https://tools.ietf.org/html/rfc8446#section-5.2 tls.writeM.Lock()
return 0, errors.New("message is too long") tls.writeBuf = append(tls.writeBuf[:5], in...)
} tls.writeBuf[3] = byte(msgLen >> 8)
writeBuf := tls.writeBufPool.Get().(*[]byte) tls.writeBuf[4] = byte(msgLen & 0xFF)
*writeBuf = append(*writeBuf, byte(msgLen>>8), byte(msgLen&0xFF)) n, err = tls.Conn.Write(tls.writeBuf[:recordLayerLength+msgLen])
*writeBuf = append(*writeBuf, in...) tls.writeM.Unlock()
n, err = tls.Conn.Write(*writeBuf)
*writeBuf = (*writeBuf)[:3]
tls.writeBufPool.Put(writeBuf)
return n - recordLayerLength, err return n - recordLayerLength, err
} }

View File

@ -2,11 +2,10 @@ package common
import ( import (
"errors" "errors"
"github.com/gorilla/websocket"
"io" "io"
"sync" "sync"
"time" "time"
"github.com/gorilla/websocket"
) )
// WebSocketConn implements io.ReadWriteCloser // WebSocketConn implements io.ReadWriteCloser

View File

@ -68,11 +68,13 @@ func Unmarshal(data []byte) (crypto.PublicKey, bool) {
return &pub, true return &pub, true
} }
func GenerateSharedSecret(privKey crypto.PrivateKey, pubKey crypto.PublicKey) ([]byte, error) { func GenerateSharedSecret(privKey crypto.PrivateKey, pubKey crypto.PublicKey) []byte {
var priv, pub *[32]byte var priv, pub, secret *[32]byte
priv = privKey.(*[32]byte) priv = privKey.(*[32]byte)
pub = pubKey.(*[32]byte) pub = pubKey.(*[32]byte)
secret = new([32]byte)
return curve25519.X25519(priv[:], pub[:]) curve25519.ScalarMult(secret, priv, pub)
return secret[:]
} }

View File

@ -90,11 +90,11 @@ func testECDH(t testing.TB) {
t.Fatalf("Unmarshal does not work") t.Fatalf("Unmarshal does not work")
} }
secret1, err = GenerateSharedSecret(privKey1, pubKey2) secret1 = GenerateSharedSecret(privKey1, pubKey2)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
secret2, err = GenerateSharedSecret(privKey2, pubKey1) secret2 = GenerateSharedSecret(privKey2, pubKey1)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }

View File

@ -13,20 +13,18 @@ import (
// instead of byte-oriented. The integrity of datagrams written into this buffer is preserved. // instead of byte-oriented. The integrity of datagrams written into this buffer is preserved.
// it won't get chopped up into individual bytes // it won't get chopped up into individual bytes
type datagramBufferedPipe struct { type datagramBufferedPipe struct {
pLens []int pLens []int
// lazily allocated
buf *bytes.Buffer buf *bytes.Buffer
closed bool closed bool
rwCond *sync.Cond rwCond *sync.Cond
wtTimeout time.Duration wtTimeout time.Duration
rDeadline time.Time rDeadline time.Time
timeoutTimer *time.Timer
} }
func NewDatagramBufferedPipe() *datagramBufferedPipe { func NewDatagramBufferedPipe() *datagramBufferedPipe {
d := &datagramBufferedPipe{ d := &datagramBufferedPipe{
rwCond: sync.NewCond(&sync.Mutex{}), rwCond: sync.NewCond(&sync.Mutex{}),
buf: new(bytes.Buffer),
} }
return d return d
} }
@ -34,25 +32,25 @@ func NewDatagramBufferedPipe() *datagramBufferedPipe {
func (d *datagramBufferedPipe) Read(target []byte) (int, error) { func (d *datagramBufferedPipe) Read(target []byte) (int, error) {
d.rwCond.L.Lock() d.rwCond.L.Lock()
defer d.rwCond.L.Unlock() defer d.rwCond.L.Unlock()
if d.buf == nil {
d.buf = new(bytes.Buffer)
}
for { for {
if d.closed && len(d.pLens) == 0 { if d.closed && len(d.pLens) == 0 {
return 0, io.EOF return 0, io.EOF
} }
hasRDeadline := !d.rDeadline.IsZero() if !d.rDeadline.IsZero() {
if hasRDeadline { delta := time.Until(d.rDeadline)
if time.Until(d.rDeadline) <= 0 { if delta <= 0 {
return 0, ErrTimeout return 0, ErrTimeout
} }
time.AfterFunc(delta, d.rwCond.Broadcast)
} }
if len(d.pLens) > 0 { if len(d.pLens) > 0 {
break break
} }
if hasRDeadline {
d.broadcastAfter(time.Until(d.rDeadline))
}
d.rwCond.Wait() d.rwCond.Wait()
} }
dataLen := d.pLens[0] dataLen := d.pLens[0]
@ -66,9 +64,53 @@ func (d *datagramBufferedPipe) Read(target []byte) (int, error) {
return dataLen, nil return dataLen, nil
} }
func (d *datagramBufferedPipe) Write(f *Frame) (toBeClosed bool, err error) { func (d *datagramBufferedPipe) WriteTo(w io.Writer) (n int64, err error) {
d.rwCond.L.Lock() d.rwCond.L.Lock()
defer d.rwCond.L.Unlock() defer d.rwCond.L.Unlock()
if d.buf == nil {
d.buf = new(bytes.Buffer)
}
for {
if d.closed && len(d.pLens) == 0 {
return 0, io.EOF
}
if !d.rDeadline.IsZero() {
delta := time.Until(d.rDeadline)
if delta <= 0 {
return 0, ErrTimeout
}
if d.wtTimeout == 0 {
// if there hasn't been a scheduled broadcast
time.AfterFunc(delta, d.rwCond.Broadcast)
}
}
if d.wtTimeout != 0 {
d.rDeadline = time.Now().Add(d.wtTimeout)
time.AfterFunc(d.wtTimeout, d.rwCond.Broadcast)
}
if len(d.pLens) > 0 {
var dataLen int
dataLen, d.pLens = d.pLens[0], d.pLens[1:]
written, er := w.Write(d.buf.Next(dataLen))
n += int64(written)
if er != nil {
d.rwCond.Broadcast()
return n, er
}
d.rwCond.Broadcast()
} else {
d.rwCond.Wait()
}
}
}
func (d *datagramBufferedPipe) Write(f Frame) (toBeClosed bool, err error) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
if d.buf == nil {
d.buf = new(bytes.Buffer)
}
for { for {
if d.closed { if d.closed {
return true, io.ErrClosedPipe return true, io.ErrClosedPipe
@ -80,7 +122,7 @@ func (d *datagramBufferedPipe) Write(f *Frame) (toBeClosed bool, err error) {
d.rwCond.Wait() d.rwCond.Wait()
} }
if f.Closing != closingNothing { if f.Closing != C_NOOP {
d.closed = true d.closed = true
d.rwCond.Broadcast() d.rwCond.Broadcast()
return true, nil return true, nil
@ -111,9 +153,10 @@ func (d *datagramBufferedPipe) SetReadDeadline(t time.Time) {
d.rwCond.Broadcast() d.rwCond.Broadcast()
} }
func (d *datagramBufferedPipe) broadcastAfter(t time.Duration) { func (d *datagramBufferedPipe) SetWriteToTimeout(t time.Duration) {
if d.timeoutTimer != nil { d.rwCond.L.Lock()
d.timeoutTimer.Stop() defer d.rwCond.L.Unlock()
}
d.timeoutTimer = time.AfterFunc(t, d.rwCond.Broadcast) d.wtTimeout = t
d.rwCond.Broadcast()
} }

View File

@ -1,37 +1,74 @@
package multiplex package multiplex
import ( import (
"bytes"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
func TestDatagramBuffer_RW(t *testing.T) { func TestDatagramBuffer_RW(t *testing.T) {
b := []byte{0x01, 0x02, 0x03} b := []byte{0x01, 0x02, 0x03}
t.Run("simple write", func(t *testing.T) { t.Run("simple write", func(t *testing.T) {
pipe := NewDatagramBufferedPipe() pipe := NewDatagramBufferedPipe()
_, err := pipe.Write(&Frame{Payload: b}) _, err := pipe.Write(Frame{Payload: b})
assert.NoError(t, err) if err != nil {
t.Error(
"expecting", "nil error",
"got", err,
)
return
}
}) })
t.Run("simple read", func(t *testing.T) { t.Run("simple read", func(t *testing.T) {
pipe := NewDatagramBufferedPipe() pipe := NewDatagramBufferedPipe()
_, _ = pipe.Write(&Frame{Payload: b}) _, _ = pipe.Write(Frame{Payload: b})
b2 := make([]byte, len(b)) b2 := make([]byte, len(b))
n, err := pipe.Read(b2) n, err := pipe.Read(b2)
assert.NoError(t, err) if n != len(b) {
assert.Equal(t, len(b), n) t.Error(
assert.Equal(t, b, b2) "For", "number of bytes read",
assert.Equal(t, 0, pipe.buf.Len(), "buf len is not 0 after finished reading") "expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"expecting", b,
"got", b2,
)
}
if pipe.buf.Len() != 0 {
t.Error("buf len is not 0 after finished reading")
return
}
}) })
t.Run("writing closing frame", func(t *testing.T) { t.Run("writing closing frame", func(t *testing.T) {
pipe := NewDatagramBufferedPipe() pipe := NewDatagramBufferedPipe()
toBeClosed, err := pipe.Write(&Frame{Closing: closingStream}) toBeClosed, err := pipe.Write(Frame{Closing: C_STREAM})
assert.NoError(t, err) if !toBeClosed {
assert.True(t, toBeClosed, "should be to be closed") t.Error("should be to be closed")
assert.True(t, pipe.closed, "pipe should be closed") }
if err != nil {
t.Error(
"expecting", "nil error",
"got", err,
)
return
}
if !pipe.closed {
t.Error("expecting closed pipe, not closed")
}
}) })
} }
@ -39,24 +76,65 @@ func TestDatagramBuffer_BlockingRead(t *testing.T) {
pipe := NewDatagramBufferedPipe() pipe := NewDatagramBufferedPipe()
b := []byte{0x01, 0x02, 0x03} b := []byte{0x01, 0x02, 0x03}
go func() { go func() {
time.Sleep(readBlockTime) time.Sleep(100 * time.Millisecond)
pipe.Write(&Frame{Payload: b}) pipe.Write(Frame{Payload: b})
}() }()
b2 := make([]byte, len(b)) b2 := make([]byte, len(b))
n, err := pipe.Read(b2) n, err := pipe.Read(b2)
assert.NoError(t, err) if n != len(b) {
assert.Equal(t, len(b), n, "number of bytes read after block is wrong") t.Error(
assert.Equal(t, b, b2) "For", "number of bytes read after block",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "blocked read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "blocked read",
"expecting", b,
"got", b2,
)
return
}
} }
func TestDatagramBuffer_CloseThenRead(t *testing.T) { func TestDatagramBuffer_CloseThenRead(t *testing.T) {
pipe := NewDatagramBufferedPipe() pipe := NewDatagramBufferedPipe()
b := []byte{0x01, 0x02, 0x03} b := []byte{0x01, 0x02, 0x03}
pipe.Write(&Frame{Payload: b}) pipe.Write(Frame{Payload: b})
b2 := make([]byte, len(b)) b2 := make([]byte, len(b))
pipe.Close() pipe.Close()
n, err := pipe.Read(b2) n, err := pipe.Read(b2)
assert.NoError(t, err) if n != len(b) {
assert.Equal(t, len(b), n, "number of bytes read after block is wrong") t.Error(
assert.Equal(t, b, b2) "For", "number of bytes read",
"expecting", len(b),
"got", n,
)
}
if err != nil {
t.Error(
"For", "simple read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "simple read",
"expecting", b,
"got", b2,
)
return
}
} }

View File

@ -1,9 +1,9 @@
package multiplex package multiplex
const ( const (
closingNothing = iota C_NOOP = iota
closingStream C_STREAM
closingSession C_SESSION
) )
type Frame struct { type Frame struct {

View File

@ -2,15 +2,14 @@ package multiplex
import ( import (
"bytes" "bytes"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/connutil"
"io" "io"
"math/rand" "math/rand"
"net" "net"
"sync" "sync"
"testing" "testing"
"time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
func serveEcho(l net.Listener) { func serveEcho(l net.Listener) {
@ -20,13 +19,13 @@ func serveEcho(l net.Listener) {
// TODO: pass the error back // TODO: pass the error back
return return
} }
go func(conn net.Conn) { go func() {
_, err := io.Copy(conn, conn) _, err := io.Copy(conn, conn)
if err != nil { if err != nil {
// TODO: pass the error back // TODO: pass the error back
return return
} }
}(conn) }()
} }
} }
@ -38,7 +37,7 @@ type connPair struct {
func makeSessionPair(numConn int) (*Session, *Session, []*connPair) { func makeSessionPair(numConn int) (*Session, *Session, []*connPair) {
sessionKey := [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} sessionKey := [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
sessionId := 1 sessionId := 1
obfuscator, _ := MakeObfuscator(EncryptionMethodChaha20Poly1305, sessionKey) obfuscator, _ := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
clientConfig := SessionConfig{ clientConfig := SessionConfig{
Obfuscator: obfuscator, Obfuscator: obfuscator,
Valve: nil, Valve: nil,
@ -64,36 +63,31 @@ func makeSessionPair(numConn int) (*Session, *Session, []*connPair) {
return clientSession, serverSession, paris return clientSession, serverSession, paris
} }
func runEchoTest(t *testing.T, conns []net.Conn, msgLen int) { func runEchoTest(t *testing.T, streams []*Stream) {
const testDataLen = 16384
var wg sync.WaitGroup var wg sync.WaitGroup
for _, stream := range streams {
for _, conn := range conns {
wg.Add(1) wg.Add(1)
go func(conn net.Conn) { go func(stream *Stream) {
defer wg.Done() testData := make([]byte, testDataLen)
testData := make([]byte, msgLen)
rand.Read(testData) rand.Read(testData)
// we cannot call t.Fatalf in concurrent contexts n, err := stream.Write(testData)
n, err := conn.Write(testData) if n != testDataLen {
if n != msgLen { t.Fatalf("written only %v, err %v", n, err)
t.Errorf("written only %v, err %v", n, err)
return
} }
recvBuf := make([]byte, msgLen) recvBuf := make([]byte, testDataLen)
_, err = io.ReadFull(conn, recvBuf) _, err = io.ReadFull(stream, recvBuf)
if err != nil { if err != nil {
t.Errorf("failed to read back: %v", err) t.Fatalf("failed to read back: %v", err)
return
} }
if !bytes.Equal(testData, recvBuf) { if !bytes.Equal(testData, recvBuf) {
t.Errorf("echoed data not correct") t.Fatalf("echoed data not correct")
return
} }
}(conn) wg.Done()
}(stream)
} }
wg.Wait() wg.Wait()
} }
@ -101,32 +95,43 @@ func runEchoTest(t *testing.T, conns []net.Conn, msgLen int) {
func TestMultiplex(t *testing.T) { func TestMultiplex(t *testing.T) {
const numStreams = 2000 // -race option limits the number of goroutines to 8192 const numStreams = 2000 // -race option limits the number of goroutines to 8192
const numConns = 4 const numConns = 4
const msgLen = 16384
clientSession, serverSession, _ := makeSessionPair(numConns) clientSession, serverSession, _ := makeSessionPair(numConns)
go serveEcho(serverSession) go serveEcho(serverSession)
streams := make([]net.Conn, numStreams) streams := make([]*Stream, numStreams)
for i := 0; i < numStreams; i++ { for i := 0; i < numStreams; i++ {
stream, err := clientSession.OpenStream() stream, err := clientSession.OpenStream()
assert.NoError(t, err) if err != nil {
t.Fatalf("failed to open stream: %v", err)
}
streams[i] = stream streams[i] = stream
} }
//test echo //test echo
runEchoTest(t, streams, msgLen) runEchoTest(t, streams)
if clientSession.streamCount() != numStreams {
assert.EqualValues(t, numStreams, clientSession.streamCount(), "client stream count is wrong") t.Errorf("client stream count is wrong: %v", clientSession.streamCount())
assert.EqualValues(t, numStreams, serverSession.streamCount(), "server stream count is wrong") }
if serverSession.streamCount() != numStreams {
t.Errorf("server stream count is wrong: %v", serverSession.streamCount())
}
// close one stream // close one stream
closing, streams := streams[0], streams[1:] closing, streams := streams[0], streams[1:]
err := closing.Close() err := closing.Close()
assert.NoError(t, err, "couldn't close a stream") if err != nil {
t.Errorf("couldn't close a stream")
}
_, err = closing.Write([]byte{0}) _, err = closing.Write([]byte{0})
assert.Equal(t, ErrBrokenStream, err) if err != ErrBrokenStream {
t.Errorf("expecting error %v, got %v", ErrBrokenStream, err)
}
_, err = closing.Read(make([]byte, 1)) _, err = closing.Read(make([]byte, 1))
assert.Equal(t, ErrBrokenStream, err) if err != ErrBrokenStream {
t.Errorf("expecting error %v, got %v", ErrBrokenStream, err)
}
} }
func TestMux_StreamClosing(t *testing.T) { func TestMux_StreamClosing(t *testing.T) {
@ -138,13 +143,17 @@ func TestMux_StreamClosing(t *testing.T) {
recvBuf := make([]byte, 128) recvBuf := make([]byte, 128)
toBeClosed, _ := clientSession.OpenStream() toBeClosed, _ := clientSession.OpenStream()
_, err := toBeClosed.Write(testData) // should be echoed back _, err := toBeClosed.Write(testData) // should be echoed back
assert.NoError(t, err, "couldn't write to a stream") if err != nil {
t.Errorf("can't write to stream: %v", err)
_, err = io.ReadFull(toBeClosed, recvBuf[:1]) }
assert.NoError(t, err, "can't read anything before stream closed")
time.Sleep(500 * time.Millisecond)
_ = toBeClosed.Close() _ = toBeClosed.Close()
_, err = io.ReadFull(toBeClosed, recvBuf[1:]) _, err = io.ReadFull(toBeClosed, recvBuf)
assert.NoError(t, err, "can't read residual data on stream") if err != nil {
assert.Equal(t, testData, recvBuf, "incorrect data read back") t.Errorf("can't read residual data on stream: %v", err)
}
if !bytes.Equal(testData, recvBuf) {
t.Errorf("incorrect data read back")
}
} }

View File

@ -3,7 +3,6 @@ package multiplex
import ( import (
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -12,190 +11,187 @@ import (
"golang.org/x/crypto/salsa20" "golang.org/x/crypto/salsa20"
) )
const frameHeaderLength = 14 type Obfser func(*Frame, []byte, int) (int, error)
type Deobfser func([]byte) (*Frame, error)
var u32 = binary.BigEndian.Uint32
var u64 = binary.BigEndian.Uint64
var putU32 = binary.BigEndian.PutUint32
var putU64 = binary.BigEndian.PutUint64
const HEADER_LEN = 14
const salsa20NonceSize = 8 const salsa20NonceSize = 8
// maxExtraLen equals the max length of padding + AEAD tag.
// It is 255 bytes because the extra len field in frame header is only one byte.
const maxExtraLen = 1<<8 - 1
// padFirstNFrames specifies the number of initial frames to pad,
// to avoid TLS-in-TLS detection
const padFirstNFrames = 5
const ( const (
EncryptionMethodPlain = iota E_METHOD_PLAIN = iota
EncryptionMethodAES256GCM E_METHOD_AES_GCM
EncryptionMethodChaha20Poly1305 E_METHOD_CHACHA20_POLY1305
EncryptionMethodAES128GCM
) )
// Obfuscator is responsible for serialisation, obfuscation, and optional encryption of data frames. // Obfuscator is responsible for the obfuscation and deobfuscation of frames
type Obfuscator struct { type Obfuscator struct {
payloadCipher cipher.AEAD // Used in Stream.Write. Add multiplexing headers, encrypt and add TLS header
Obfs Obfser
// Remove TLS header, decrypt and unmarshall frames
Deobfs Deobfser
SessionKey [32]byte
sessionKey [32]byte maxOverhead int
} }
// obfuscate adds multiplexing headers, encrypt and add TLS header // MakeObfs returns a function of type Obfser. An Obfser takes three arguments:
func (o *Obfuscator) obfuscate(f *Frame, buf []byte, payloadOffsetInBuf int) (int, error) { // a *Frame with all the field set correctly, a []byte as buffer to put encrypted
// The method here is to use the first payloadCipher.NonceSize() bytes of the serialised frame header // message in, and an int called payloadOffsetInBuf to be used when *Frame.payload
// as iv/nonce for the AEAD cipher to encrypt the frame payload. Then we use // is in the byte slice used as buffer (2nd argument). payloadOffsetInBuf specifies
// the authentication tag produced appended to the end of the ciphertext (of size payloadCipher.Overhead()) // the index at which data belonging to *Frame.Payload starts in the buffer.
// as nonce for Salsa20 to encrypt the frame header. Both with sessionKey as keys. func MakeObfs(salsaKey [32]byte, payloadCipher cipher.AEAD) Obfser {
// obfs := func(f *Frame, buf []byte, payloadOffsetInBuf int) (int, error) {
// Several cryptographic guarantees we have made here: that payloadCipher, as an AEAD, is given a unique // we need the encrypted data to be at least 8 bytes to be used as nonce for salsa20 stream header encryption
// iv/nonce each time, relative to its key; that the frame header encryptor Salsa20 is given a unique // this will be the case if the encryption method is an AEAD cipher, however for plain, it's well possible
// nonce each time, relative to its key; and that the authenticity of frame header is checked. // that the frame payload is smaller than 8 bytes, so we need to add on the difference
// payloadLen := len(f.Payload)
// The payloadCipher is given a unique iv/nonce each time because it is derived from the frame header, which if payloadLen == 0 {
// contains the monotonically increasing stream id (uint32) and frame sequence (uint64). There will be a nonce return 0, errors.New("payload cannot be empty")
// reuse after 2^64-1 frames sent (sent, not received because frames going different ways are sequenced }
// independently) by a stream, or after 2^32-1 streams created in a single session. We consider these number var extraLen int
// to be large enough that they may never happen in reasonable time frames. Of course, different sessions if payloadCipher == nil {
// will produce the same combination of stream id and frame sequence, but they will have different session keys. extraLen = salsa20NonceSize - payloadLen
// if extraLen < 0 {
// // if our payload is already greater than 8 bytes
// Because the frame header, before it being encrypted, is fed into the AEAD, it is also authenticated. extraLen = 0
// (rfc5116 s.2.1 "The nonce is authenticated internally to the algorithm"). }
//
// In case the user chooses to not encrypt the frame payload, payloadCipher will be nil. In this scenario,
// we generate random bytes to be used as salsa20 nonce.
payloadLen := len(f.Payload)
if payloadLen == 0 {
return 0, errors.New("payload cannot be empty")
}
tagLen := 0
if o.payloadCipher != nil {
tagLen = o.payloadCipher.Overhead()
} else {
tagLen = salsa20NonceSize
}
// Pad to avoid size side channel leak
padLen := 0
if f.Seq < padFirstNFrames {
padLen = common.RandInt(maxExtraLen - tagLen + 1)
}
usefulLen := frameHeaderLength + payloadLen + padLen + tagLen
if len(buf) < usefulLen {
return 0, errors.New("obfs buffer too small")
}
// we do as much in-place as possible to save allocation
payload := buf[frameHeaderLength : frameHeaderLength+payloadLen+padLen]
if payloadOffsetInBuf != frameHeaderLength {
// if payload is not at the correct location in buffer
copy(payload, f.Payload)
}
header := buf[:frameHeaderLength]
binary.BigEndian.PutUint32(header[0:4], f.StreamID)
binary.BigEndian.PutUint64(header[4:12], f.Seq)
header[12] = f.Closing
header[13] = byte(padLen + tagLen)
// Random bytes for padding and nonce
_, err := rand.Read(buf[frameHeaderLength+payloadLen : usefulLen])
if err != nil {
return 0, fmt.Errorf("failed to pad random: %w", err)
}
if o.payloadCipher != nil {
o.payloadCipher.Seal(payload[:0], header[:o.payloadCipher.NonceSize()], payload, nil)
}
nonce := buf[usefulLen-salsa20NonceSize : usefulLen]
salsa20.XORKeyStream(header, header, nonce, &o.sessionKey)
return usefulLen, nil
}
// deobfuscate removes TLS header, decrypt and unmarshall frames
func (o *Obfuscator) deobfuscate(f *Frame, in []byte) error {
if len(in) < frameHeaderLength+salsa20NonceSize {
return fmt.Errorf("input size %v, but it cannot be shorter than %v bytes", len(in), frameHeaderLength+salsa20NonceSize)
}
header := in[:frameHeaderLength]
pldWithOverHead := in[frameHeaderLength:] // payload + potential overhead
nonce := in[len(in)-salsa20NonceSize:]
salsa20.XORKeyStream(header, header, nonce, &o.sessionKey)
streamID := binary.BigEndian.Uint32(header[0:4])
seq := binary.BigEndian.Uint64(header[4:12])
closing := header[12]
extraLen := header[13]
usefulPayloadLen := len(pldWithOverHead) - int(extraLen)
if usefulPayloadLen < 0 || usefulPayloadLen > len(pldWithOverHead) {
return errors.New("extra length is negative or extra length is greater than total pldWithOverHead length")
}
var outputPayload []byte
if o.payloadCipher == nil {
if extraLen == 0 {
outputPayload = pldWithOverHead
} else { } else {
extraLen = payloadCipher.Overhead()
if extraLen < salsa20NonceSize {
return 0, errors.New("AEAD's Overhead cannot be fewer than 8 bytes")
}
}
usefulLen := HEADER_LEN + payloadLen + extraLen
if len(buf) < usefulLen {
return 0, errors.New("obfs buffer too small")
}
// we do as much in-place as possible to save allocation
payload := buf[HEADER_LEN : HEADER_LEN+payloadLen]
if payloadOffsetInBuf != HEADER_LEN {
// if payload is not at the correct location in buffer
copy(payload, f.Payload)
}
header := buf[:HEADER_LEN]
putU32(header[0:4], f.StreamID)
putU64(header[4:12], f.Seq)
header[12] = f.Closing
header[13] = byte(extraLen)
if payloadCipher == nil {
if extraLen != 0 { // read nonce
extra := buf[usefulLen-extraLen : usefulLen]
common.CryptoRandRead(extra)
}
} else {
payloadCipher.Seal(payload[:0], header[:payloadCipher.NonceSize()], payload, nil)
}
nonce := buf[usefulLen-salsa20NonceSize : usefulLen]
salsa20.XORKeyStream(header, header, nonce, &salsaKey)
return usefulLen, nil
}
return obfs
}
// MakeDeobfs returns a function Deobfser. A Deobfser takes in a single byte slice,
// containing the message to be decrypted, and returns a *Frame containing the frame
// information and plaintext
func MakeDeobfs(salsaKey [32]byte, payloadCipher cipher.AEAD) Deobfser {
// stream header length + minimum data size (i.e. nonce size of salsa20)
const minInputLen = HEADER_LEN + salsa20NonceSize
deobfs := func(in []byte) (*Frame, error) {
if len(in) < minInputLen {
return nil, fmt.Errorf("input size %v, but it cannot be shorter than %v bytes", len(in), minInputLen)
}
header := in[:HEADER_LEN]
pldWithOverHead := in[HEADER_LEN:] // payload + potential overhead
nonce := in[len(in)-salsa20NonceSize:]
salsa20.XORKeyStream(header, header, nonce, &salsaKey)
streamID := u32(header[0:4])
seq := u64(header[4:12])
closing := header[12]
extraLen := header[13]
usefulPayloadLen := len(pldWithOverHead) - int(extraLen)
if usefulPayloadLen < 0 || usefulPayloadLen > len(pldWithOverHead) {
return nil, errors.New("extra length is negative or extra length is greater than total pldWithOverHead length")
}
var outputPayload []byte
if payloadCipher == nil {
if extraLen == 0 {
outputPayload = pldWithOverHead
} else {
outputPayload = pldWithOverHead[:usefulPayloadLen]
}
} else {
_, err := payloadCipher.Open(pldWithOverHead[:0], header[:payloadCipher.NonceSize()], pldWithOverHead, nil)
if err != nil {
return nil, err
}
outputPayload = pldWithOverHead[:usefulPayloadLen] outputPayload = pldWithOverHead[:usefulPayloadLen]
} }
} else {
_, err := o.payloadCipher.Open(pldWithOverHead[:0], header[:o.payloadCipher.NonceSize()], pldWithOverHead, nil)
if err != nil {
return err
}
outputPayload = pldWithOverHead[:usefulPayloadLen]
}
f.StreamID = streamID ret := &Frame{
f.Seq = seq StreamID: streamID,
f.Closing = closing Seq: seq,
f.Payload = outputPayload Closing: closing,
return nil Payload: outputPayload,
}
return ret, nil
}
return deobfs
} }
func MakeObfuscator(encryptionMethod byte, sessionKey [32]byte) (o Obfuscator, err error) { func MakeObfuscator(encryptionMethod byte, sessionKey [32]byte) (obfuscator Obfuscator, err error) {
o = Obfuscator{ obfuscator = Obfuscator{
sessionKey: sessionKey, SessionKey: sessionKey,
} }
var payloadCipher cipher.AEAD
switch encryptionMethod { switch encryptionMethod {
case EncryptionMethodPlain: case E_METHOD_PLAIN:
o.payloadCipher = nil payloadCipher = nil
case EncryptionMethodAES256GCM: obfuscator.maxOverhead = salsa20NonceSize
case E_METHOD_AES_GCM:
var c cipher.Block var c cipher.Block
c, err = aes.NewCipher(sessionKey[:]) c, err = aes.NewCipher(sessionKey[:])
if err != nil { if err != nil {
return return
} }
o.payloadCipher, err = cipher.NewGCM(c) payloadCipher, err = cipher.NewGCM(c)
if err != nil { if err != nil {
return return
} }
case EncryptionMethodAES128GCM: obfuscator.maxOverhead = payloadCipher.Overhead()
var c cipher.Block case E_METHOD_CHACHA20_POLY1305:
c, err = aes.NewCipher(sessionKey[:16]) payloadCipher, err = chacha20poly1305.New(sessionKey[:])
if err != nil {
return
}
o.payloadCipher, err = cipher.NewGCM(c)
if err != nil {
return
}
case EncryptionMethodChaha20Poly1305:
o.payloadCipher, err = chacha20poly1305.New(sessionKey[:])
if err != nil { if err != nil {
return return
} }
obfuscator.maxOverhead = payloadCipher.Overhead()
default: default:
return o, fmt.Errorf("unknown encryption method valued %v", encryptionMethod) return obfuscator, errors.New("Unknown encryption method")
} }
if o.payloadCipher != nil { if payloadCipher != nil {
if o.payloadCipher.NonceSize() > frameHeaderLength { if payloadCipher.NonceSize() > HEADER_LEN {
return o, errors.New("payload AEAD's nonce size cannot be greater than size of frame header") return obfuscator, errors.New("payload AEAD's nonce size cannot be greater than size of frame header")
} }
} }
obfuscator.Obfs = MakeObfs(sessionKey, payloadCipher)
obfuscator.Deobfs = MakeDeobfs(sessionKey, payloadCipher)
return return
} }

View File

@ -1,128 +1,73 @@
package multiplex package multiplex
import ( import (
"bytes"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"golang.org/x/crypto/chacha20poly1305"
"math/rand" "math/rand"
"reflect" "reflect"
"testing" "testing"
"testing/quick" "testing/quick"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/chacha20poly1305"
) )
func TestGenerateObfs(t *testing.T) { func TestGenerateObfs(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
run := func(o Obfuscator, t *testing.T) { run := func(obfuscator Obfuscator, ct *testing.T) {
obfsBuf := make([]byte, 512) obfsBuf := make([]byte, 512)
_testFrame, _ := quick.Value(reflect.TypeOf(Frame{}), rand.New(rand.NewSource(42))) f := &Frame{}
testFrame := _testFrame.Interface().(Frame) _testFrame, _ := quick.Value(reflect.TypeOf(f), rand.New(rand.NewSource(42)))
i, err := o.obfuscate(&testFrame, obfsBuf, 0) testFrame := _testFrame.Interface().(*Frame)
assert.NoError(t, err) i, err := obfuscator.Obfs(testFrame, obfsBuf, 0)
var resultFrame Frame if err != nil {
ct.Error("failed to obfs ", err)
return
}
err = o.deobfuscate(&resultFrame, obfsBuf[:i]) resultFrame, err := obfuscator.Deobfs(obfsBuf[:i])
assert.NoError(t, err) if err != nil {
assert.EqualValues(t, testFrame, resultFrame) ct.Error("failed to deobfs ", err)
return
}
if !bytes.Equal(testFrame.Payload, resultFrame.Payload) || testFrame.StreamID != resultFrame.StreamID {
ct.Error("expecting", testFrame,
"got", resultFrame)
return
}
} }
t.Run("plain", func(t *testing.T) { t.Run("plain", func(t *testing.T) {
o, err := MakeObfuscator(EncryptionMethodPlain, sessionKey) obfuscator, err := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
assert.NoError(t, err) if err != nil {
run(o, t) t.Errorf("failed to generate obfuscator %v", err)
} else {
run(obfuscator, t)
}
}) })
t.Run("aes-256-gcm", func(t *testing.T) { t.Run("aes-gcm", func(t *testing.T) {
o, err := MakeObfuscator(EncryptionMethodAES256GCM, sessionKey) obfuscator, err := MakeObfuscator(E_METHOD_AES_GCM, sessionKey)
assert.NoError(t, err) if err != nil {
run(o, t) t.Errorf("failed to generate obfuscator %v", err)
}) } else {
t.Run("aes-128-gcm", func(t *testing.T) { run(obfuscator, t)
o, err := MakeObfuscator(EncryptionMethodAES128GCM, sessionKey) }
assert.NoError(t, err)
run(o, t)
}) })
t.Run("chacha20-poly1305", func(t *testing.T) { t.Run("chacha20-poly1305", func(t *testing.T) {
o, err := MakeObfuscator(EncryptionMethodChaha20Poly1305, sessionKey) obfuscator, err := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
assert.NoError(t, err) if err != nil {
run(o, t) t.Errorf("failed to generate obfuscator %v", err)
} else {
run(obfuscator, t)
}
}) })
t.Run("unknown encryption method", func(t *testing.T) { t.Run("unknown encryption method", func(t *testing.T) {
_, err := MakeObfuscator(0xff, sessionKey) _, err := MakeObfuscator(0xff, sessionKey)
assert.Error(t, err) if err == nil {
}) t.Errorf("unknown encryption mehtod error expected")
}
func TestObfuscate(t *testing.T) {
var sessionKey [32]byte
rand.Read(sessionKey[:])
const testPayloadLen = 1024
testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload)
f := Frame{
StreamID: 0,
Seq: 0,
Closing: 0,
Payload: testPayload,
}
runTest := func(t *testing.T, o Obfuscator) {
obfsBuf := make([]byte, testPayloadLen*2)
n, err := o.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
resultFrame := Frame{}
err = o.deobfuscate(&resultFrame, obfsBuf[:n])
assert.NoError(t, err)
assert.EqualValues(t, f, resultFrame)
}
t.Run("plain", func(t *testing.T) {
o := Obfuscator{
payloadCipher: nil,
sessionKey: sessionKey,
} }
runTest(t, o)
}) })
t.Run("aes-128-gcm", func(t *testing.T) {
c, err := aes.NewCipher(sessionKey[:16])
assert.NoError(t, err)
payloadCipher, err := cipher.NewGCM(c)
assert.NoError(t, err)
o := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: sessionKey,
}
runTest(t, o)
})
t.Run("aes-256-gcm", func(t *testing.T) {
c, err := aes.NewCipher(sessionKey[:])
assert.NoError(t, err)
payloadCipher, err := cipher.NewGCM(c)
assert.NoError(t, err)
o := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: sessionKey,
}
runTest(t, o)
})
t.Run("chacha20-poly1305", func(t *testing.T) {
payloadCipher, err := chacha20poly1305.New(sessionKey[:])
assert.NoError(t, err)
o := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: sessionKey,
}
runTest(t, o)
})
} }
func BenchmarkObfs(b *testing.B) { func BenchmarkObfs(b *testing.B) {
@ -135,7 +80,7 @@ func BenchmarkObfs(b *testing.B) {
testPayload, testPayload,
} }
obfsBuf := make([]byte, len(testPayload)*2) obfsBuf := make([]byte, defaultSendRecvBufSize)
var key [32]byte var key [32]byte
rand.Read(key[:]) rand.Read(key[:])
@ -143,53 +88,40 @@ func BenchmarkObfs(b *testing.B) {
c, _ := aes.NewCipher(key[:]) c, _ := aes.NewCipher(key[:])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
b.Run("AES128GCM", func(b *testing.B) { b.Run("AES128GCM", func(b *testing.B) {
c, _ := aes.NewCipher(key[:16]) c, _ := aes.NewCipher(key[:16])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
b.Run("plain", func(b *testing.B) { b.Run("plain", func(b *testing.B) {
obfuscator := Obfuscator{ obfs := MakeObfs(key, nil)
payloadCipher: nil,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
b.Run("chacha20Poly1305", func(b *testing.B) { b.Run("chacha20Poly1305", func(b *testing.B) {
payloadCipher, _ := chacha20poly1305.New(key[:]) payloadCipher, _ := chacha20poly1305.New(key[:16])
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
} }
@ -204,73 +136,60 @@ func BenchmarkDeobfs(b *testing.B) {
testPayload, testPayload,
} }
obfsBuf := make([]byte, len(testPayload)*2) obfsBuf := make([]byte, defaultSendRecvBufSize)
var key [32]byte var key [32]byte
rand.Read(key[:]) rand.Read(key[:])
b.Run("AES256GCM", func(b *testing.B) { b.Run("AES256GCM", func(b *testing.B) {
c, _ := aes.NewCipher(key[:]) c, _ := aes.NewCipher(key[:])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: key,
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs := MakeObfs(key, payloadCipher)
n, _ := obfs(testFrame, obfsBuf, 0)
deobfs := MakeDeobfs(key, payloadCipher)
frame := new(Frame)
b.SetBytes(int64(n)) b.SetBytes(int64(n))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
b.Run("AES128GCM", func(b *testing.B) { b.Run("AES128GCM", func(b *testing.B) {
c, _ := aes.NewCipher(key[:16]) c, _ := aes.NewCipher(key[:16])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher, n, _ := obfs(testFrame, obfsBuf, 0)
sessionKey: key, deobfs := MakeDeobfs(key, payloadCipher)
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0)
frame := new(Frame)
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(n)) b.SetBytes(int64(n))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
b.Run("plain", func(b *testing.B) { b.Run("plain", func(b *testing.B) {
obfuscator := Obfuscator{ obfs := MakeObfs(key, nil)
payloadCipher: nil, n, _ := obfs(testFrame, obfsBuf, 0)
sessionKey: key, deobfs := MakeDeobfs(key, nil)
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0)
frame := new(Frame)
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(n)) b.SetBytes(int64(n))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
b.Run("chacha20Poly1305", func(b *testing.B) { b.Run("chacha20Poly1305", func(b *testing.B) {
payloadCipher, _ := chacha20poly1305.New(key[:]) payloadCipher, _ := chacha20poly1305.New(key[:16])
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher, n, _ := obfs(testFrame, obfsBuf, 0)
sessionKey: key, deobfs := MakeDeobfs(key, payloadCipher)
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0)
frame := new(Frame)
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(n)) b.SetBytes(int64(n))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
} }

View File

@ -14,11 +14,15 @@ type recvBuffer interface {
// Instead, it should behave as if it hasn't been closed. Closure is only relevant // Instead, it should behave as if it hasn't been closed. Closure is only relevant
// when the buffer is empty. // when the buffer is empty.
io.ReadCloser io.ReadCloser
Write(*Frame) (toBeClosed bool, err error) io.WriterTo
Write(Frame) (toBeClosed bool, err error)
SetReadDeadline(time time.Time) SetReadDeadline(time time.Time)
// SetWriteToTimeout sets the duration a recvBuffer waits in a WriteTo call when nothing
// has been written for a while. After that duration it should return ErrTimeout
SetWriteToTimeout(d time.Duration)
} }
// size we want the amount of unread data in buffer to grow before recvBuffer.Write blocks. // size we want the amount of unread data in buffer to grow before recvBuffer.Write blocks.
// If the buffer grows larger than what the system's memory can offer at the time of recvBuffer.Write, // If the buffer grows larger than what the system's memory can offer at the time of recvBuffer.Write,
// a panic will happen. // a panic will happen.
const recvBufferSizeLimit = 1<<31 - 1 const recvBufferSizeLimit = defaultSendRecvBufSize << 12

View File

@ -3,20 +3,20 @@ package multiplex
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"net" "net"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
const ( const (
acceptBacklog = 1024 acceptBacklog = 1024
// TODO: will this be a signature?
defaultSendRecvBufSize = 20480
defaultInactivityTimeout = 30 * time.Second defaultInactivityTimeout = 30 * time.Second
defaultMaxOnWireSize = 1<<14 + 256 // https://tools.ietf.org/html/rfc8446#section-5.2
) )
var ErrBrokenSession = errors.New("broken session") var ErrBrokenSession = errors.New("broken session")
@ -24,27 +24,30 @@ var errRepeatSessionClosing = errors.New("trying to close a closed session")
var errRepeatStreamClosing = errors.New("trying to close a closed stream") var errRepeatStreamClosing = errors.New("trying to close a closed stream")
var errNoMultiplex = errors.New("a singleplexing session can have only one stream") var errNoMultiplex = errors.New("a singleplexing session can have only one stream")
type switchboardStrategy int
type SessionConfig struct { type SessionConfig struct {
Obfuscator Obfuscator
// Valve is used to limit transmission rates, and record and limit usage
Valve Valve
Unordered bool Unordered bool
// A Singleplexing session always has just one stream
Singleplex bool Singleplex bool
// maximum size of an obfuscated frame, including headers and overhead // maximum size of an obfuscated frame, including headers and overhead
MsgOnWireSizeLimit int MsgOnWireSizeLimit int
// StreamSendBufferSize sets the buffer size used to send data from a Stream (Stream.obfsBuf)
StreamSendBufferSize int
// ConnReceiveBufferSize sets the buffer size used to receive data from an underlying Conn (allocated in
// switchboard.deplex)
ConnReceiveBufferSize int
// InactivityTimeout sets the duration a Session waits while it has no active streams before it closes itself // InactivityTimeout sets the duration a Session waits while it has no active streams before it closes itself
InactivityTimeout time.Duration InactivityTimeout time.Duration
} }
// A Session represents a self-contained communication chain between local and remote. It manages its streams,
// controls serialisation and encryption of data sent and received using the supplied Obfuscator, and send and receive
// data through a manged connection pool filled with underlying connections added to it.
type Session struct { type Session struct {
id uint32 id uint32
@ -55,16 +58,7 @@ type Session struct {
// atomic // atomic
activeStreamCount uint32 activeStreamCount uint32
streams sync.Map
streamsM sync.Mutex
streams map[uint32]*Stream
// For accepting new streams
acceptCh chan *Stream
// a pool of heap allocated frame objects so we don't have to allocate a new one each time we receive a frame
recvFramePool sync.Pool
streamObfsBufPool sync.Pool
// Switchboard manages all connections to remote // Switchboard manages all connections to remote
sb *switchboard sb *switchboard
@ -72,19 +66,16 @@ type Session struct {
// Used for LocalAddr() and RemoteAddr() etc. // Used for LocalAddr() and RemoteAddr() etc.
addrs atomic.Value addrs atomic.Value
// For accepting new streams
acceptCh chan *Stream
closed uint32 closed uint32
terminalMsgSetter sync.Once terminalMsg atomic.Value
terminalMsg string
// the max size passed to Write calls before it splits it into multiple frames // the max size passed to Write calls before it splits it into multiple frames
// i.e. the max size a piece of data can fit into a Frame.Payload // i.e. the max size a piece of data can fit into a Frame.Payload
maxStreamUnitWrite int maxStreamUnitWrite int
// streamSendBufferSize sets the buffer size used to send data from a Stream (Stream.obfsBuf)
streamSendBufferSize int
// connReceiveBufferSize sets the buffer size used to receive data from an underlying Conn (allocated in
// switchboard.deplex)
connReceiveBufferSize int
} }
func MakeSession(id uint32, config SessionConfig) *Session { func MakeSession(id uint32, config SessionConfig) *Session {
@ -93,39 +84,32 @@ func MakeSession(id uint32, config SessionConfig) *Session {
SessionConfig: config, SessionConfig: config,
nextStreamID: 1, nextStreamID: 1,
acceptCh: make(chan *Stream, acceptBacklog), acceptCh: make(chan *Stream, acceptBacklog),
recvFramePool: sync.Pool{New: func() interface{} { return &Frame{} }},
streams: map[uint32]*Stream{},
} }
sesh.addrs.Store([]net.Addr{nil, nil}) sesh.addrs.Store([]net.Addr{nil, nil})
if config.Valve == nil { if config.Valve == nil {
sesh.Valve = UNLIMITED_VALVE sesh.Valve = UNLIMITED_VALVE
} }
if config.StreamSendBufferSize <= 0 {
sesh.StreamSendBufferSize = defaultSendRecvBufSize
}
if config.ConnReceiveBufferSize <= 0 {
sesh.ConnReceiveBufferSize = defaultSendRecvBufSize
}
if config.MsgOnWireSizeLimit <= 0 { if config.MsgOnWireSizeLimit <= 0 {
sesh.MsgOnWireSizeLimit = defaultMaxOnWireSize sesh.MsgOnWireSizeLimit = defaultSendRecvBufSize - 1024
} }
if config.InactivityTimeout == 0 { if config.InactivityTimeout == 0 {
sesh.InactivityTimeout = defaultInactivityTimeout sesh.InactivityTimeout = defaultInactivityTimeout
} }
// todo: validation. this must be smaller than StreamSendBufferSize
sesh.maxStreamUnitWrite = sesh.MsgOnWireSizeLimit - frameHeaderLength - maxExtraLen sesh.maxStreamUnitWrite = sesh.MsgOnWireSizeLimit - HEADER_LEN - sesh.Obfuscator.maxOverhead
sesh.streamSendBufferSize = sesh.MsgOnWireSizeLimit
sesh.connReceiveBufferSize = 20480 // for backwards compatibility
sesh.streamObfsBufPool = sync.Pool{New: func() interface{} {
b := make([]byte, sesh.streamSendBufferSize)
return &b
}}
sesh.sb = makeSwitchboard(sesh) sesh.sb = makeSwitchboard(sesh)
time.AfterFunc(sesh.InactivityTimeout, sesh.checkTimeout) go sesh.timeoutAfter(sesh.InactivityTimeout)
return sesh return sesh
} }
func (sesh *Session) GetSessionKey() [32]byte {
return sesh.sessionKey
}
func (sesh *Session) streamCountIncr() uint32 { func (sesh *Session) streamCountIncr() uint32 {
return atomic.AddUint32(&sesh.activeStreamCount, 1) return atomic.AddUint32(&sesh.activeStreamCount, 1)
} }
@ -136,14 +120,12 @@ func (sesh *Session) streamCount() uint32 {
return atomic.LoadUint32(&sesh.activeStreamCount) return atomic.LoadUint32(&sesh.activeStreamCount)
} }
// AddConnection is used to add an underlying connection to the connection pool
func (sesh *Session) AddConnection(conn net.Conn) { func (sesh *Session) AddConnection(conn net.Conn) {
sesh.sb.addConn(conn) sesh.sb.addConn(conn)
addrs := []net.Addr{conn.LocalAddr(), conn.RemoteAddr()} addrs := []net.Addr{conn.LocalAddr(), conn.RemoteAddr()}
sesh.addrs.Store(addrs) sesh.addrs.Store(addrs)
} }
// OpenStream is similar to net.Dial. It opens up a new stream
func (sesh *Session) OpenStream() (*Stream, error) { func (sesh *Session) OpenStream() (*Stream, error) {
if sesh.IsClosed() { if sesh.IsClosed() {
return nil, ErrBrokenSession return nil, ErrBrokenSession
@ -156,15 +138,12 @@ func (sesh *Session) OpenStream() (*Stream, error) {
return nil, errNoMultiplex return nil, errNoMultiplex
} }
stream := makeStream(sesh, id) stream := makeStream(sesh, id)
sesh.streamsM.Lock() sesh.streams.Store(id, stream)
sesh.streams[id] = stream
sesh.streamsM.Unlock()
sesh.streamCountIncr() sesh.streamCountIncr()
log.Tracef("stream %v of session %v opened", id, sesh.id) log.Tracef("stream %v of session %v opened", id, sesh.id)
return stream, nil return stream, nil
} }
// Accept is similar to net.Listener's Accept(). It blocks and returns an incoming stream
func (sesh *Session) Accept() (net.Conn, error) { func (sesh *Session) Accept() (net.Conn, error) {
if sesh.IsClosed() { if sesh.IsClosed() {
return nil, ErrBrokenSession return nil, ErrBrokenSession
@ -178,46 +157,44 @@ func (sesh *Session) Accept() (net.Conn, error) {
} }
func (sesh *Session) closeStream(s *Stream, active bool) error { func (sesh *Session) closeStream(s *Stream, active bool) error {
if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) { if atomic.SwapUint32(&s.closed, 1) == 1 {
return fmt.Errorf("closing stream %v: %w", s.id, errRepeatStreamClosing) return fmt.Errorf("closing stream %v: %w", s.id, errRepeatStreamClosing)
} }
_ = s.recvBuf.Close() // recvBuf.Close should not return error _ = s.recvBuf.Close() // recvBuf.Close should not return error
if active { if active {
tmpBuf := sesh.streamObfsBufPool.Get().(*[]byte)
// Notify remote that this stream is closed // Notify remote that this stream is closed
common.CryptoRandRead((*tmpBuf)[:1]) padding := genRandomPadding()
padLen := int((*tmpBuf)[0]) + 1 f := &Frame{
payload := (*tmpBuf)[frameHeaderLength : padLen+frameHeaderLength] StreamID: s.id,
common.CryptoRandRead(payload) Seq: s.nextSendSeq,
Closing: C_STREAM,
Payload: padding,
}
s.nextSendSeq++
// must be holding s.wirtingM on entry obfsBuf := make([]byte, len(padding)+HEADER_LEN+sesh.Obfuscator.maxOverhead)
s.writingFrame.Closing = closingStream i, err := sesh.Obfs(f, obfsBuf, 0)
s.writingFrame.Payload = payload
err := s.obfuscateAndSend(*tmpBuf, frameHeaderLength)
sesh.streamObfsBufPool.Put(tmpBuf)
if err != nil { if err != nil {
return err return err
} }
log.Tracef("stream %v actively closed.", s.id) _, err = sesh.sb.send(obfsBuf[:i], &s.assignedConnId)
if err != nil {
return err
}
log.Tracef("stream %v actively closed. seq %v", s.id, f.Seq)
} else { } else {
log.Tracef("stream %v passively closed", s.id) log.Tracef("stream %v passively closed", s.id)
} }
// We set it as nil to signify that the stream id had existed before. // id may or may not exist as this is user input, if we use Delete(s.id) here it will panic
// If we Delete(s.id) straight away, later on in recvDataFromRemote, it will not be able to tell sesh.streams.Store(s.id, nil)
// if the frame it received was from a new stream or a dying stream whose frame arrived late
sesh.streamsM.Lock()
sesh.streams[s.id] = nil
sesh.streamsM.Unlock()
if sesh.streamCountDecr() == 0 { if sesh.streamCountDecr() == 0 {
if sesh.Singleplex { if sesh.Singleplex {
return sesh.Close() return sesh.Close()
} else { } else {
log.Debugf("session %v has no active stream left", sesh.id) log.Debugf("session %v has no active stream left", sesh.id)
time.AfterFunc(sesh.InactivityTimeout, sesh.checkTimeout) go sesh.timeoutAfter(sesh.InactivityTimeout)
} }
} }
return nil return nil
@ -227,112 +204,117 @@ func (sesh *Session) closeStream(s *Stream, active bool) error {
// to the stream buffer, otherwise it fetches the desired stream instance, or creates and stores one if it's a new // to the stream buffer, otherwise it fetches the desired stream instance, or creates and stores one if it's a new
// stream and then writes to the stream buffer // stream and then writes to the stream buffer
func (sesh *Session) recvDataFromRemote(data []byte) error { func (sesh *Session) recvDataFromRemote(data []byte) error {
frame := sesh.recvFramePool.Get().(*Frame) frame, err := sesh.Deobfs(data)
defer sesh.recvFramePool.Put(frame)
err := sesh.deobfuscate(frame, data)
if err != nil { if err != nil {
return fmt.Errorf("Failed to decrypt a frame for session %v: %v", sesh.id, err) return fmt.Errorf("Failed to decrypt a frame for session %v: %v", sesh.id, err)
} }
if frame.Closing == closingSession { if frame.Closing == C_SESSION {
sesh.SetTerminalMsg("Received a closing notification frame") sesh.SetTerminalMsg("Received a closing notification frame")
return sesh.passiveClose() return sesh.passiveClose()
} }
sesh.streamsM.Lock() newStream := makeStream(sesh, frame.StreamID)
if sesh.IsClosed() { existingStreamI, existing := sesh.streams.LoadOrStore(frame.StreamID, newStream)
sesh.streamsM.Unlock()
return ErrBrokenSession
}
existingStream, existing := sesh.streams[frame.StreamID]
if existing { if existing {
sesh.streamsM.Unlock() if existingStreamI == nil {
if existingStream == nil {
// this is when the stream existed before but has since been closed. We do nothing // this is when the stream existed before but has since been closed. We do nothing
return nil return nil
} }
return existingStream.recvFrame(frame) return existingStreamI.(*Stream).recvFrame(*frame)
} else { } else {
newStream := makeStream(sesh, frame.StreamID)
sesh.streams[frame.StreamID] = newStream
sesh.acceptCh <- newStream
sesh.streamsM.Unlock()
// new stream // new stream
sesh.streamCountIncr() sesh.streamCountIncr()
return newStream.recvFrame(frame) sesh.acceptCh <- newStream
return newStream.recvFrame(*frame)
} }
} }
func (sesh *Session) SetTerminalMsg(msg string) { func (sesh *Session) SetTerminalMsg(msg string) {
log.Debug("terminal message set to " + msg) sesh.terminalMsg.Store(msg)
sesh.terminalMsgSetter.Do(func() {
sesh.terminalMsg = msg
})
} }
func (sesh *Session) TerminalMsg() string { func (sesh *Session) TerminalMsg() string {
return sesh.terminalMsg msg := sesh.terminalMsg.Load()
} if msg != nil {
return msg.(string)
func (sesh *Session) closeSession() error { } else {
if !atomic.CompareAndSwapUint32(&sesh.closed, 0, 1) { return ""
log.Debugf("session %v has already been closed", sesh.id)
return errRepeatSessionClosing
} }
sesh.streamsM.Lock()
close(sesh.acceptCh)
for id, stream := range sesh.streams {
if stream != nil && atomic.CompareAndSwapUint32(&stream.closed, 0, 1) {
_ = stream.recvBuf.Close() // will not block
delete(sesh.streams, id)
sesh.streamCountDecr()
}
}
sesh.streamsM.Unlock()
return nil
} }
func (sesh *Session) passiveClose() error { func (sesh *Session) passiveClose() error {
log.Debugf("attempting to passively close session %v", sesh.id) log.Debugf("attempting to passively close session %v", sesh.id)
err := sesh.closeSession() if atomic.SwapUint32(&sesh.closed, 1) == 1 {
if err != nil { log.Debugf("session %v has already been closed", sesh.id)
return err return errRepeatSessionClosing
} }
sesh.acceptCh <- nil
sesh.streams.Range(func(key, streamI interface{}) bool {
if streamI == nil {
return true
}
stream := streamI.(*Stream)
atomic.StoreUint32(&stream.closed, 1)
_ = stream.recvBuf.Close() // will not block
sesh.streams.Delete(key)
sesh.streamCountDecr()
return true
})
sesh.sb.closeAll() sesh.sb.closeAll()
log.Debugf("session %v closed gracefully", sesh.id) log.Debugf("session %v closed gracefully", sesh.id)
return nil return nil
} }
func genRandomPadding() []byte {
lenB := make([]byte, 1)
common.CryptoRandRead(lenB)
pad := make([]byte, lenB[0]+1)
common.CryptoRandRead(pad)
return pad
}
func (sesh *Session) Close() error { func (sesh *Session) Close() error {
log.Debugf("attempting to actively close session %v", sesh.id) log.Debugf("attempting to actively close session %v", sesh.id)
err := sesh.closeSession() if atomic.SwapUint32(&sesh.closed, 1) == 1 {
if err != nil { log.Debugf("session %v has already been closed", sesh.id)
return err return errRepeatSessionClosing
} }
sesh.acceptCh <- nil
// close all streams
sesh.streams.Range(func(key, streamI interface{}) bool {
if streamI == nil {
return true
}
stream := streamI.(*Stream)
atomic.StoreUint32(&stream.closed, 1)
_ = stream.recvBuf.Close() // will not block
sesh.streams.Delete(key)
sesh.streamCountDecr()
return true
})
// we send a notice frame telling remote to close the session // we send a notice frame telling remote to close the session
pad := genRandomPadding()
buf := sesh.streamObfsBufPool.Get().(*[]byte)
common.CryptoRandRead((*buf)[:1])
padLen := int((*buf)[0]) + 1
payload := (*buf)[frameHeaderLength : padLen+frameHeaderLength]
common.CryptoRandRead(payload)
f := &Frame{ f := &Frame{
StreamID: 0xffffffff, StreamID: 0xffffffff,
Seq: 0, Seq: 0,
Closing: closingSession, Closing: C_SESSION,
Payload: payload, Payload: pad,
} }
i, err := sesh.obfuscate(f, *buf, frameHeaderLength) obfsBuf := make([]byte, len(pad)+HEADER_LEN+sesh.Obfuscator.maxOverhead)
i, err := sesh.Obfs(f, obfsBuf, 0)
if err != nil { if err != nil {
return err return err
} }
_, err = sesh.sb.send((*buf)[:i], new(net.Conn)) _, err = sesh.sb.send(obfsBuf[:i], new(uint32))
if err != nil { if err != nil {
return err return err
} }
sesh.sb.closeAll() sesh.sb.closeAll()
log.Debugf("session %v closed gracefully", sesh.id) log.Debugf("session %v closed gracefully", sesh.id)
return nil return nil
@ -342,7 +324,9 @@ func (sesh *Session) IsClosed() bool {
return atomic.LoadUint32(&sesh.closed) == 1 return atomic.LoadUint32(&sesh.closed) == 1
} }
func (sesh *Session) checkTimeout() { func (sesh *Session) timeoutAfter(to time.Duration) {
time.Sleep(to)
if sesh.streamCount() == 0 && !sesh.IsClosed() { if sesh.streamCount() == 0 && !sesh.IsClosed() {
sesh.SetTerminalMsg("timeout") sesh.SetTerminalMsg("timeout")
sesh.Close() sesh.Close()

View File

@ -1,10 +1,9 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package multiplex package multiplex
func setupSesh_fuzz(unordered bool) *Session { func setupSesh_fuzz(unordered bool) *Session {
obfuscator, _ := MakeObfuscator(EncryptionMethodPlain, [32]byte{}) obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, [32]byte{})
seshConfig := SessionConfig{ seshConfig := SessionConfig{
Obfuscator: obfuscator, Obfuscator: obfuscator,

View File

@ -2,237 +2,171 @@ package multiplex
import ( import (
"bytes" "bytes"
"io" "github.com/cbeuw/connutil"
"io/ioutil"
"math/rand" "math/rand"
"net"
"strconv" "strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
var seshConfigs = map[string]SessionConfig{ var seshConfigOrdered = SessionConfig{}
"ordered": {},
"unordered": {Unordered: true},
}
var encryptionMethods = map[string]byte{
"plain": EncryptionMethodPlain,
"aes-256-gcm": EncryptionMethodAES256GCM,
"aes-128-gcm": EncryptionMethodAES128GCM,
"chacha20poly1305": EncryptionMethodChaha20Poly1305,
}
const testPayloadLen = 1024 var seshConfigUnordered = SessionConfig{
const obfsBufLen = testPayloadLen * 2 Unordered: true,
}
func TestRecvDataFromRemote(t *testing.T) { func TestRecvDataFromRemote(t *testing.T) {
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload)
f := &Frame{
1,
0,
0,
testPayload,
}
obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
t.Run("plain ordered", func(t *testing.T) {
obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfigOrdered)
n, _ := sesh.Obfs(f, obfsBuf, 0)
for seshType, seshConfig := range seshConfigs { err := sesh.recvDataFromRemote(obfsBuf[:n])
seshConfig := seshConfig if err != nil {
t.Run(seshType, func(t *testing.T) { t.Error(err)
var err error return
seshConfig.Obfuscator, err = MakeObfuscator(EncryptionMethodPlain, sessionKey) }
if err != nil { stream, err := sesh.Accept()
t.Fatalf("failed to make obfuscator: %v", err) if err != nil {
} t.Error(err)
t.Run("initial frame", func(t *testing.T) { return
sesh := MakeSession(0, seshConfig) }
obfsBuf := make([]byte, obfsBufLen)
f := Frame{
1,
0,
0,
make([]byte, testPayloadLen),
}
rand.Read(f.Payload)
n, err := sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
stream, err := sesh.Accept()
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen) resultPayload := make([]byte, testPayloadLen)
_, err = stream.Read(resultPayload) _, err = stream.Read(resultPayload)
assert.NoError(t, err) if err != nil {
t.Error(err)
return
}
if !bytes.Equal(testPayload, resultPayload) {
t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
}
})
t.Run("aes-gcm ordered", func(t *testing.T) {
obfuscator, _ := MakeObfuscator(E_METHOD_AES_GCM, sessionKey)
seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfigOrdered)
n, _ := sesh.Obfs(f, obfsBuf, 0)
assert.EqualValues(t, f.Payload, resultPayload) err := sesh.recvDataFromRemote(obfsBuf[:n])
}) if err != nil {
t.Error(err)
return
}
stream, err := sesh.Accept()
if err != nil {
t.Error(err)
return
}
t.Run("two frames in order", func(t *testing.T) { resultPayload := make([]byte, testPayloadLen)
sesh := MakeSession(0, seshConfig) _, err = stream.Read(resultPayload)
obfsBuf := make([]byte, obfsBufLen) if err != nil {
f := Frame{ t.Error(err)
1, return
0, }
0, if !bytes.Equal(testPayload, resultPayload) {
make([]byte, testPayloadLen), t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
} }
rand.Read(f.Payload) })
n, err := sesh.obfuscate(&f, obfsBuf, 0) t.Run("chacha20-poly1305 ordered", func(t *testing.T) {
assert.NoError(t, err) obfuscator, _ := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
err = sesh.recvDataFromRemote(obfsBuf[:n]) seshConfigOrdered.Obfuscator = obfuscator
assert.NoError(t, err) sesh := MakeSession(0, seshConfigOrdered)
stream, err := sesh.Accept() n, _ := sesh.Obfs(f, obfsBuf, 0)
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen) err := sesh.recvDataFromRemote(obfsBuf[:n])
_, err = io.ReadFull(stream, resultPayload) if err != nil {
assert.NoError(t, err) t.Error(err)
return
}
stream, err := sesh.Accept()
if err != nil {
t.Error(err)
return
}
assert.EqualValues(t, f.Payload, resultPayload) resultPayload := make([]byte, testPayloadLen)
_, err = stream.Read(resultPayload)
if err != nil {
t.Error(err)
return
}
if !bytes.Equal(testPayload, resultPayload) {
t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
}
})
f.Seq += 1 t.Run("plain unordered", func(t *testing.T) {
rand.Read(f.Payload) obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
n, err = sesh.obfuscate(&f, obfsBuf, 0) seshConfigUnordered.Obfuscator = obfuscator
assert.NoError(t, err) sesh := MakeSession(0, seshConfigOrdered)
err = sesh.recvDataFromRemote(obfsBuf[:n]) n, _ := sesh.Obfs(f, obfsBuf, 0)
assert.NoError(t, err)
_, err = io.ReadFull(stream, resultPayload) err := sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err) if err != nil {
t.Error(err)
return
}
stream, err := sesh.Accept()
if err != nil {
t.Error(err)
return
}
assert.EqualValues(t, f.Payload, resultPayload) resultPayload := make([]byte, testPayloadLen)
}) _, err = stream.Read(resultPayload)
if err != nil {
t.Run("two frames in order", func(t *testing.T) { t.Error(err)
sesh := MakeSession(0, seshConfig) return
obfsBuf := make([]byte, obfsBufLen) }
f := Frame{ if !bytes.Equal(testPayload, resultPayload) {
1, t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
0, }
0, })
make([]byte, testPayloadLen),
}
rand.Read(f.Payload)
n, err := sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
stream, err := sesh.Accept()
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen)
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, f.Payload, resultPayload)
f.Seq += 1
rand.Read(f.Payload)
n, err = sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, f.Payload, resultPayload)
})
if seshType == "ordered" {
t.Run("frames out of order", func(t *testing.T) {
sesh := MakeSession(0, seshConfig)
obfsBuf := make([]byte, obfsBufLen)
f := Frame{
1,
0,
0,
nil,
}
// First frame
seq0 := make([]byte, testPayloadLen)
rand.Read(seq0)
f.Seq = 0
f.Payload = seq0
n, err := sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
// Third frame
seq2 := make([]byte, testPayloadLen)
rand.Read(seq2)
f.Seq = 2
f.Payload = seq2
n, err = sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
// Second frame
seq1 := make([]byte, testPayloadLen)
rand.Read(seq1)
f.Seq = 1
f.Payload = seq1
n, err = sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
// Expect things to receive in order
stream, err := sesh.Accept()
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen)
// First
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, seq0, resultPayload)
// Second
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, seq1, resultPayload)
// Third
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, seq2, resultPayload)
})
}
})
}
} }
func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) { func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen) testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload) rand.Read(testPayload)
obfsBuf := make([]byte, obfsBufLen) obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfig := seshConfigs["ordered"] seshConfigOrdered.Obfuscator = obfuscator
seshConfig.Obfuscator, _ = MakeObfuscator(EncryptionMethodPlain, sessionKey) sesh := MakeSession(0, seshConfigOrdered)
sesh := MakeSession(0, seshConfig)
f1 := &Frame{ f1 := &Frame{
1, 1,
0, 0,
closingNothing, C_NOOP,
testPayload, testPayload,
} }
// create stream 1 // create stream 1
n, _ := sesh.obfuscate(f1, obfsBuf, 0) n, _ := sesh.Obfs(f1, obfsBuf, 0)
err := sesh.recvDataFromRemote(obfsBuf[:n]) err := sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving normal frame for stream 1: %v", err) t.Fatalf("receiving normal frame for stream 1: %v", err)
} }
sesh.streamsM.Lock() _, ok := sesh.streams.Load(f1.StreamID)
_, ok := sesh.streams[f1.StreamID]
sesh.streamsM.Unlock()
if !ok { if !ok {
t.Fatal("failed to fetch stream 1 after receiving it") t.Fatal("failed to fetch stream 1 after receiving it")
} }
@ -244,18 +178,16 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
f2 := &Frame{ f2 := &Frame{
2, 2,
0, 0,
closingNothing, C_NOOP,
testPayload, testPayload,
} }
n, _ = sesh.obfuscate(f2, obfsBuf, 0) n, _ = sesh.Obfs(f2, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving normal frame for stream 2: %v", err) t.Fatalf("receiving normal frame for stream 2: %v", err)
} }
sesh.streamsM.Lock() s2I, ok := sesh.streams.Load(f2.StreamID)
s2M, ok := sesh.streams[f2.StreamID] if s2I == nil || !ok {
sesh.streamsM.Unlock()
if s2M == nil || !ok {
t.Fatal("failed to fetch stream 2 after receiving it") t.Fatal("failed to fetch stream 2 after receiving it")
} }
if sesh.streamCount() != 2 { if sesh.streamCount() != 2 {
@ -266,18 +198,16 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
f1CloseStream := &Frame{ f1CloseStream := &Frame{
1, 1,
1, 1,
closingStream, C_STREAM,
testPayload, testPayload,
} }
n, _ = sesh.obfuscate(f1CloseStream, obfsBuf, 0) n, _ = sesh.Obfs(f1CloseStream, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving stream closing frame for stream 1: %v", err) t.Fatalf("receiving stream closing frame for stream 1: %v", err)
} }
sesh.streamsM.Lock() s1I, _ := sesh.streams.Load(f1.StreamID)
s1M, _ := sesh.streams[f1.StreamID] if s1I != nil {
sesh.streamsM.Unlock()
if s1M != nil {
t.Fatal("stream 1 still exist after receiving stream close") t.Fatal("stream 1 still exist after receiving stream close")
} }
s1, _ := sesh.Accept() s1, _ := sesh.Accept()
@ -298,30 +228,27 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
} }
// close stream 1 again // close stream 1 again
n, _ = sesh.obfuscate(f1CloseStream, obfsBuf, 0) n, _ = sesh.Obfs(f1CloseStream, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving stream closing frame for stream 1 %v", err) t.Fatalf("receiving stream closing frame for stream 1 %v", err)
} }
sesh.streamsM.Lock() s1I, _ = sesh.streams.Load(f1.StreamID)
s1M, _ = sesh.streams[f1.StreamID] if s1I != nil {
sesh.streamsM.Unlock()
if s1M != nil {
t.Error("stream 1 exists after receiving stream close for the second time") t.Error("stream 1 exists after receiving stream close for the second time")
} }
streamCount := sesh.streamCount() if sesh.streamCount() != 1 {
if streamCount != 1 { t.Error("stream count isn't 1 after stream 1 closed twice")
t.Errorf("stream count is %v after stream 1 closed twice, expected 1", streamCount)
} }
// close session // close session
fCloseSession := &Frame{ fCloseSession := &Frame{
StreamID: 0xffffffff, StreamID: 0xffffffff,
Seq: 0, Seq: 0,
Closing: closingSession, Closing: C_SESSION,
Payload: testPayload, Payload: testPayload,
} }
n, _ = sesh.obfuscate(fCloseSession, obfsBuf, 0) n, _ = sesh.Obfs(fCloseSession, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving session closing frame: %v", err) t.Fatalf("receiving session closing frame: %v", err)
@ -345,32 +272,30 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) { func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) {
// Tests for when the closing frame of a stream is received first before any data frame // Tests for when the closing frame of a stream is received first before any data frame
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen) testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload) rand.Read(testPayload)
obfsBuf := make([]byte, obfsBufLen) obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfig := seshConfigs["ordered"] seshConfigOrdered.Obfuscator = obfuscator
seshConfig.Obfuscator, _ = MakeObfuscator(EncryptionMethodPlain, sessionKey) sesh := MakeSession(0, seshConfigOrdered)
sesh := MakeSession(0, seshConfig)
// receive stream 1 closing first // receive stream 1 closing first
f1CloseStream := &Frame{ f1CloseStream := &Frame{
1, 1,
1, 1,
closingStream, C_STREAM,
testPayload, testPayload,
} }
n, _ := sesh.obfuscate(f1CloseStream, obfsBuf, 0) n, _ := sesh.Obfs(f1CloseStream, obfsBuf, 0)
err := sesh.recvDataFromRemote(obfsBuf[:n]) err := sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving out of order stream closing frame for stream 1: %v", err) t.Fatalf("receiving out of order stream closing frame for stream 1: %v", err)
} }
sesh.streamsM.Lock() _, ok := sesh.streams.Load(f1CloseStream.StreamID)
_, ok := sesh.streams[f1CloseStream.StreamID]
sesh.streamsM.Unlock()
if !ok { if !ok {
t.Fatal("stream 1 doesn't exist") t.Fatal("stream 1 doesn't exist")
} }
@ -382,10 +307,10 @@ func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) {
f1 := &Frame{ f1 := &Frame{
1, 1,
0, 0,
closingNothing, C_NOOP,
testPayload, testPayload,
} }
n, _ = sesh.obfuscate(f1, obfsBuf, 0) n, _ = sesh.Obfs(f1, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving normal frame for stream 1: %v", err) t.Fatalf("receiving normal frame for stream 1: %v", err)
@ -409,232 +334,173 @@ func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) {
func TestParallelStreams(t *testing.T) { func TestParallelStreams(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
obfuscator, _ := MakeObfuscator(EncryptionMethodPlain, sessionKey) obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfigOrdered)
for seshType, seshConfig := range seshConfigs { numStreams := acceptBacklog
seshConfig := seshConfig seqs := make([]*uint64, numStreams)
t.Run(seshType, func(t *testing.T) { for i := range seqs {
seshConfig.Obfuscator = obfuscator seqs[i] = new(uint64)
sesh := MakeSession(0, seshConfig) }
randFrame := func() *Frame {
id := rand.Intn(numStreams)
return &Frame{
uint32(id),
atomic.AddUint64(seqs[id], 1) - 1,
uint8(rand.Intn(2)),
[]byte{1, 2, 3, 4},
}
}
numStreams := acceptBacklog numOfTests := 5000
seqs := make([]*uint64, numStreams) tests := make([]struct {
for i := range seqs { name string
seqs[i] = new(uint64) frame *Frame
} }, numOfTests)
randFrame := func() *Frame { for i := range tests {
id := rand.Intn(numStreams) tests[i].name = strconv.Itoa(i)
return &Frame{ tests[i].frame = randFrame()
uint32(id), }
atomic.AddUint64(seqs[id], 1) - 1,
uint8(rand.Intn(2)),
[]byte{1, 2, 3, 4},
}
}
const numOfTests = 5000 var wg sync.WaitGroup
tests := make([]struct { for _, tc := range tests {
name string wg.Add(1)
frame *Frame go func(frame *Frame) {
}, numOfTests) data := make([]byte, 1000)
for i := range tests { n, _ := sesh.Obfs(frame, data, 0)
tests[i].name = strconv.Itoa(i) data = data[0:n]
tests[i].frame = randFrame()
}
var wg sync.WaitGroup err := sesh.recvDataFromRemote(data)
for _, tc := range tests { if err != nil {
wg.Add(1) t.Error(err)
go func(frame *Frame) { }
obfsBuf := make([]byte, obfsBufLen) wg.Done()
n, _ := sesh.obfuscate(frame, obfsBuf, 0) }(tc.frame)
obfsBuf = obfsBuf[0:n] }
err := sesh.recvDataFromRemote(obfsBuf) wg.Wait()
if err != nil { sc := int(sesh.streamCount())
t.Error(err) var count int
} sesh.streams.Range(func(_, s interface{}) bool {
wg.Done() if s != nil {
}(tc.frame) count++
} }
return true
wg.Wait() })
sc := int(sesh.streamCount()) if sc != count {
var count int t.Errorf("broken referential integrety: actual %v, reference count: %v", count, sc)
sesh.streamsM.Lock()
for _, s := range sesh.streams {
if s != nil {
count++
}
}
sesh.streamsM.Unlock()
if sc != count {
t.Errorf("broken referential integrety: actual %v, reference count: %v", count, sc)
}
})
} }
} }
func TestStream_SetReadDeadline(t *testing.T) { func TestStream_SetReadDeadline(t *testing.T) {
for seshType, seshConfig := range seshConfigs { var sessionKey [32]byte
seshConfig := seshConfig rand.Read(sessionKey[:])
t.Run(seshType, func(t *testing.T) { obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
sesh := MakeSession(0, seshConfig) seshConfigOrdered.Obfuscator = obfuscator
sesh.AddConnection(connutil.Discard())
t.Run("read after deadline set", func(t *testing.T) { testReadDeadline := func(sesh *Session) {
stream, _ := sesh.OpenStream() t.Run("read after deadline set", func(t *testing.T) {
_ = stream.SetReadDeadline(time.Now().Add(-1 * time.Second)) stream, _ := sesh.OpenStream()
_, err := stream.Read(make([]byte, 1)) _ = stream.SetReadDeadline(time.Now().Add(-1 * time.Second))
if err != ErrTimeout { _, err := stream.Read(make([]byte, 1))
t.Errorf("expecting error %v, got %v", ErrTimeout, err) if err != ErrTimeout {
} t.Errorf("expecting error %v, got %v", ErrTimeout, err)
}) }
})
t.Run("unblock when deadline passed", func(t *testing.T) { t.Run("unblock when deadline passed", func(t *testing.T) {
stream, _ := sesh.OpenStream() stream, _ := sesh.OpenStream()
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
_, _ = stream.Read(make([]byte, 1)) _, _ = stream.Read(make([]byte, 1))
done <- struct{}{} done <- struct{}{}
}() }()
_ = stream.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) _ = stream.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
select { select {
case <-done: case <-done:
return return
case <-time.After(500 * time.Millisecond): case <-time.After(500 * time.Millisecond):
t.Error("Read did not unblock after deadline has passed") t.Error("Read did not unblock after deadline has passed")
} }
})
}) })
} }
sesh := MakeSession(0, seshConfigOrdered)
sesh.AddConnection(connutil.Discard())
testReadDeadline(sesh)
sesh = MakeSession(0, seshConfigUnordered)
sesh.AddConnection(connutil.Discard())
testReadDeadline(sesh)
} }
func TestSession_timeoutAfter(t *testing.T) { func TestSession_timeoutAfter(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
obfuscator, _ := MakeObfuscator(EncryptionMethodPlain, sessionKey) obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfigOrdered.Obfuscator = obfuscator
for seshType, seshConfig := range seshConfigs { seshConfigOrdered.InactivityTimeout = 100 * time.Millisecond
seshConfig := seshConfig sesh := MakeSession(0, seshConfigOrdered)
t.Run(seshType, func(t *testing.T) { time.Sleep(200 * time.Millisecond)
seshConfig.Obfuscator = obfuscator if !sesh.IsClosed() {
seshConfig.InactivityTimeout = 100 * time.Millisecond t.Error("session should have timed out")
sesh := MakeSession(0, seshConfig)
assert.Eventually(t, func() bool {
return sesh.IsClosed()
}, 5*seshConfig.InactivityTimeout, seshConfig.InactivityTimeout, "session should have timed out")
})
} }
} }
func BenchmarkRecvDataFromRemote(b *testing.B) { func BenchmarkRecvDataFromRemote_Ordered(b *testing.B) {
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen) testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload) rand.Read(testPayload)
f := Frame{ f := &Frame{
1, 1,
0, 0,
0, 0,
testPayload, testPayload,
} }
obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
const maxIter = 500_000 // run with -benchtime 500000x to avoid index out of bounds panic b.Run("plain", func(b *testing.B) {
for name, ep := range encryptionMethods { obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
ep := ep seshConfigOrdered.Obfuscator = obfuscator
b.Run(name, func(b *testing.B) { sesh := MakeSession(0, seshConfigOrdered)
for seshType, seshConfig := range seshConfigs { n, _ := sesh.Obfs(f, obfsBuf, 0)
b.Run(seshType, func(b *testing.B) {
f := f
seshConfig.Obfuscator, _ = MakeObfuscator(ep, sessionKey)
sesh := MakeSession(0, seshConfig)
go func() { b.SetBytes(int64(len(f.Payload)))
stream, _ := sesh.Accept() b.ResetTimer()
io.Copy(ioutil.Discard, stream) for i := 0; i < b.N; i++ {
}() sesh.recvDataFromRemote(obfsBuf[:n])
}
})
binaryFrames := [maxIter][]byte{} b.Run("aes-gcm", func(b *testing.B) {
for i := 0; i < maxIter; i++ { obfuscator, _ := MakeObfuscator(E_METHOD_AES_GCM, sessionKey)
obfsBuf := make([]byte, obfsBufLen) seshConfigOrdered.Obfuscator = obfuscator
n, _ := sesh.obfuscate(&f, obfsBuf, 0) sesh := MakeSession(0, seshConfigOrdered)
binaryFrames[i] = obfsBuf[:n] n, _ := sesh.Obfs(f, obfsBuf, 0)
f.Seq++
}
b.SetBytes(int64(len(f.Payload))) b.SetBytes(int64(len(f.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
sesh.recvDataFromRemote(binaryFrames[i]) sesh.recvDataFromRemote(obfsBuf[:n])
} }
}) })
}
}) b.Run("chacha20-poly1305", func(b *testing.B) {
} obfuscator, _ := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
} seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfigOrdered)
func BenchmarkMultiStreamWrite(b *testing.B) { n, _ := sesh.Obfs(f, obfsBuf, 0)
var sessionKey [32]byte
rand.Read(sessionKey[:]) b.SetBytes(int64(len(f.Payload)))
b.ResetTimer()
testPayload := make([]byte, testPayloadLen) for i := 0; i < b.N; i++ {
sesh.recvDataFromRemote(obfsBuf[:n])
for name, ep := range encryptionMethods { }
b.Run(name, func(b *testing.B) { })
for seshType, seshConfig := range seshConfigs {
b.Run(seshType, func(b *testing.B) {
seshConfig.Obfuscator, _ = MakeObfuscator(ep, sessionKey)
sesh := MakeSession(0, seshConfig)
sesh.AddConnection(connutil.Discard())
b.ResetTimer()
b.SetBytes(testPayloadLen)
b.RunParallel(func(pb *testing.PB) {
stream, _ := sesh.OpenStream()
for pb.Next() {
stream.Write(testPayload)
}
})
})
}
})
}
}
func BenchmarkLatency(b *testing.B) {
var sessionKey [32]byte
rand.Read(sessionKey[:])
for name, ep := range encryptionMethods {
b.Run(name, func(b *testing.B) {
for seshType, seshConfig := range seshConfigs {
b.Run(seshType, func(b *testing.B) {
seshConfig.Obfuscator, _ = MakeObfuscator(ep, sessionKey)
clientSesh := MakeSession(0, seshConfig)
serverSesh := MakeSession(0, seshConfig)
c, s := net.Pipe()
clientSesh.AddConnection(c)
serverSesh.AddConnection(s)
buf := make([]byte, 64)
clientStream, _ := clientSesh.OpenStream()
clientStream.Write(buf)
serverStream, _ := serverSesh.Accept()
io.ReadFull(serverStream, buf)
b.ResetTimer()
for i := 0; i < b.N; i++ {
clientStream.Write(buf)
io.ReadFull(serverStream, buf)
}
})
}
})
}
} }

View File

@ -6,10 +6,9 @@ import (
"net" "net"
"time" "time"
log "github.com/sirupsen/logrus"
"sync" "sync"
"sync/atomic" "sync/atomic"
log "github.com/sirupsen/logrus"
) )
var ErrBrokenStream = errors.New("broken stream") var ErrBrokenStream = errors.New("broken stream")
@ -25,41 +24,46 @@ type Stream struct {
session *Session session *Session
// a buffer (implemented as an asynchronous buffered pipe) to put data we've received from recvFrame but hasn't // a buffer (implemented as an asynchronous buffered pipe) to put data we've received from recvFrame but hasn't
// been read by the consumer through Read or WriteTo. // been read by the consumer through Read or WriteTo
recvBuf recvBuffer recvBuf recvBuffer
writingM sync.Mutex nextSendSeq uint64
writingFrame Frame // we do the allocation here to save repeated allocations in Write and ReadFrom
writingM sync.Mutex
// atomic // atomic
closed uint32 closed uint32
// lazy allocation for obfsBuf. This is desirable because obfsBuf is only used when data is sent from
// the stream (through Write or ReadFrom). Some streams never send data so eager allocation will waste
// memory
allocIdempot sync.Once
// obfuscation happens in this buffer
obfsBuf []byte
// When we want order guarantee (i.e. session.Unordered is false), // When we want order guarantee (i.e. session.Unordered is false),
// we assign each stream a fixed underlying connection. // we assign each stream a fixed underlying connection.
// If the underlying connections the session uses provide ordering guarantee (most likely TCP), // If the underlying connections the session uses provide ordering guarantee (most likely TCP),
// recvBuffer (implemented by streamBuffer under ordered mode) will not receive out-of-order packets // recvBuffer (implemented by streamBuffer under ordered mode) will not receive out-of-order packets
// so it won't have to use its priority queue to sort it. // so it won't have to use its priority queue to sort it.
// This is not used in unordered connection mode // This is not used in unordered connection mode
assignedConn net.Conn assignedConnId uint32
readFromTimeout time.Duration readFromTimeout time.Duration
} }
func makeStream(sesh *Session, id uint32) *Stream { func makeStream(sesh *Session, id uint32) *Stream {
var recvBuf recvBuffer
if sesh.Unordered {
recvBuf = NewDatagramBufferedPipe()
} else {
recvBuf = NewStreamBuffer()
}
stream := &Stream{ stream := &Stream{
id: id, id: id,
session: sesh, session: sesh,
writingFrame: Frame{ recvBuf: recvBuf,
StreamID: id,
Seq: 0,
Closing: closingNothing,
},
}
if sesh.Unordered {
stream.recvBuf = NewDatagramBufferedPipe()
} else {
stream.recvBuf = NewStreamBuffer()
} }
return stream return stream
@ -68,7 +72,7 @@ func makeStream(sesh *Session, id uint32) *Stream {
func (s *Stream) isClosed() bool { return atomic.LoadUint32(&s.closed) == 1 } func (s *Stream) isClosed() bool { return atomic.LoadUint32(&s.closed) == 1 }
// receive a readily deobfuscated Frame so its payload can later be Read // receive a readily deobfuscated Frame so its payload can later be Read
func (s *Stream) recvFrame(frame *Frame) error { func (s *Stream) recvFrame(frame Frame) error {
toBeClosed, err := s.recvBuf.Write(frame) toBeClosed, err := s.recvBuf.Write(frame)
if toBeClosed { if toBeClosed {
err = s.passiveClose() err = s.passiveClose()
@ -96,14 +100,26 @@ func (s *Stream) Read(buf []byte) (n int, err error) {
return return
} }
func (s *Stream) obfuscateAndSend(buf []byte, payloadOffsetInBuf int) error { // WriteTo continuously write data Stream has received into the writer w.
cipherTextLen, err := s.session.obfuscate(&s.writingFrame, buf, payloadOffsetInBuf) func (s *Stream) WriteTo(w io.Writer) (int64, error) {
s.writingFrame.Seq++ // will keep writing until the underlying buffer is closed
n, err := s.recvBuf.WriteTo(w)
log.Tracef("%v read from stream %v with err %v", n, s.id, err)
if err == io.EOF {
return n, ErrBrokenStream
}
return n, nil
}
func (s *Stream) obfuscateAndSend(f *Frame, payloadOffsetInObfsBuf int) error {
var cipherTextLen int
cipherTextLen, err := s.session.Obfs(f, s.obfsBuf, payloadOffsetInObfsBuf)
if err != nil { if err != nil {
return err return err
} }
_, err = s.session.sb.send(buf[:cipherTextLen], &s.assignedConn) _, err = s.session.sb.send(s.obfsBuf[:cipherTextLen], &s.assignedConnId)
log.Tracef("%v sent to remote through stream %v with err %v. seq: %v", len(f.Payload), s.id, err, f.Seq)
if err != nil { if err != nil {
if err == errBrokenSwitchboard { if err == errBrokenSwitchboard {
s.session.SetTerminalMsg(err.Error()) s.session.SetTerminalMsg(err.Error())
@ -122,6 +138,9 @@ func (s *Stream) Write(in []byte) (n int, err error) {
return 0, ErrBrokenStream return 0, ErrBrokenStream
} }
if s.obfsBuf == nil {
s.obfsBuf = make([]byte, s.session.StreamSendBufferSize)
}
for n < len(in) { for n < len(in) {
var framePayload []byte var framePayload []byte
if len(in)-n <= s.session.maxStreamUnitWrite { if len(in)-n <= s.session.maxStreamUnitWrite {
@ -136,10 +155,14 @@ func (s *Stream) Write(in []byte) (n int, err error) {
} }
framePayload = in[n : s.session.maxStreamUnitWrite+n] framePayload = in[n : s.session.maxStreamUnitWrite+n]
} }
s.writingFrame.Payload = framePayload f := &Frame{
buf := s.session.streamObfsBufPool.Get().(*[]byte) StreamID: s.id,
err = s.obfuscateAndSend(*buf, 0) Seq: s.nextSendSeq,
s.session.streamObfsBufPool.Put(buf) Closing: C_NOOP,
Payload: framePayload,
}
s.nextSendSeq++
err = s.obfuscateAndSend(f, 0)
if err != nil { if err != nil {
return return
} }
@ -151,6 +174,9 @@ func (s *Stream) Write(in []byte) (n int, err error) {
// ReadFrom continuously read data from r and send it off, until either r returns error or nothing has been read // ReadFrom continuously read data from r and send it off, until either r returns error or nothing has been read
// for readFromTimeout amount of time // for readFromTimeout amount of time
func (s *Stream) ReadFrom(r io.Reader) (n int64, err error) { func (s *Stream) ReadFrom(r io.Reader) (n int64, err error) {
if s.obfsBuf == nil {
s.obfsBuf = make([]byte, s.session.StreamSendBufferSize)
}
for { for {
if s.readFromTimeout != 0 { if s.readFromTimeout != 0 {
if rder, ok := r.(net.Conn); !ok { if rder, ok := r.(net.Conn); !ok {
@ -159,23 +185,24 @@ func (s *Stream) ReadFrom(r io.Reader) (n int64, err error) {
rder.SetReadDeadline(time.Now().Add(s.readFromTimeout)) rder.SetReadDeadline(time.Now().Add(s.readFromTimeout))
} }
} }
buf := s.session.streamObfsBufPool.Get().(*[]byte) read, er := r.Read(s.obfsBuf[HEADER_LEN : HEADER_LEN+s.session.maxStreamUnitWrite])
read, er := r.Read((*buf)[frameHeaderLength : frameHeaderLength+s.session.maxStreamUnitWrite])
if er != nil { if er != nil {
return n, er return n, er
} }
// the above read may have been unblocked by another goroutine calling stream.Close(), so we need
// to check that here
if s.isClosed() { if s.isClosed() {
return n, ErrBrokenStream return n, ErrBrokenStream
} }
s.writingM.Lock() s.writingM.Lock()
s.writingFrame.Payload = (*buf)[frameHeaderLength : frameHeaderLength+read] f := &Frame{
err = s.obfuscateAndSend(*buf, frameHeaderLength) StreamID: s.id,
Seq: s.nextSendSeq,
Closing: C_NOOP,
Payload: s.obfsBuf[HEADER_LEN : HEADER_LEN+read],
}
s.nextSendSeq++
err = s.obfuscateAndSend(f, HEADER_LEN)
s.writingM.Unlock() s.writingM.Unlock()
s.session.streamObfsBufPool.Put(buf)
if err != nil { if err != nil {
return return
@ -199,6 +226,7 @@ func (s *Stream) Close() error {
func (s *Stream) LocalAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[0] } func (s *Stream) LocalAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[0] }
func (s *Stream) RemoteAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[1] } func (s *Stream) RemoteAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[1] }
func (s *Stream) SetWriteToTimeout(d time.Duration) { s.recvBuf.SetWriteToTimeout(d) }
func (s *Stream) SetReadDeadline(t time.Time) error { s.recvBuf.SetReadDeadline(t); return nil } func (s *Stream) SetReadDeadline(t time.Time) error { s.recvBuf.SetReadDeadline(t); return nil }
func (s *Stream) SetReadFromTimeout(d time.Duration) { s.readFromTimeout = d } func (s *Stream) SetReadFromTimeout(d time.Duration) { s.readFromTimeout = d }

View File

@ -13,6 +13,7 @@ package multiplex
import ( import (
"container/heap" "container/heap"
"fmt" "fmt"
"io"
"sync" "sync"
"time" "time"
) )
@ -62,12 +63,12 @@ func NewStreamBuffer() *streamBuffer {
return sb return sb
} }
func (sb *streamBuffer) Write(f *Frame) (toBeClosed bool, err error) { func (sb *streamBuffer) Write(f Frame) (toBeClosed bool, err error) {
sb.recvM.Lock() sb.recvM.Lock()
defer sb.recvM.Unlock() defer sb.recvM.Unlock()
// when there'fs no ooo packages in heap and we receive the next package in order // when there'fs no ooo packages in heap and we receive the next package in order
if len(sb.sh) == 0 && f.Seq == sb.nextRecvSeq { if len(sb.sh) == 0 && f.Seq == sb.nextRecvSeq {
if f.Closing != closingNothing { if f.Closing != C_NOOP {
return true, nil return true, nil
} else { } else {
sb.buf.Write(f.Payload) sb.buf.Write(f.Payload)
@ -80,14 +81,11 @@ func (sb *streamBuffer) Write(f *Frame) (toBeClosed bool, err error) {
return false, fmt.Errorf("seq %v is smaller than nextRecvSeq %v", f.Seq, sb.nextRecvSeq) return false, fmt.Errorf("seq %v is smaller than nextRecvSeq %v", f.Seq, sb.nextRecvSeq)
} }
saved := *f heap.Push(&sb.sh, &f)
saved.Payload = make([]byte, len(f.Payload))
copy(saved.Payload, f.Payload)
heap.Push(&sb.sh, &saved)
// Keep popping from the heap until empty or to the point that the wanted seq was not received // Keep popping from the heap until empty or to the point that the wanted seq was not received
for len(sb.sh) > 0 && sb.sh[0].Seq == sb.nextRecvSeq { for len(sb.sh) > 0 && sb.sh[0].Seq == sb.nextRecvSeq {
f = heap.Pop(&sb.sh).(*Frame) f = *heap.Pop(&sb.sh).(*Frame)
if f.Closing != closingNothing { if f.Closing != C_NOOP {
return true, nil return true, nil
} else { } else {
sb.buf.Write(f.Payload) sb.buf.Write(f.Payload)
@ -101,6 +99,10 @@ func (sb *streamBuffer) Read(buf []byte) (int, error) {
return sb.buf.Read(buf) return sb.buf.Read(buf)
} }
func (sb *streamBuffer) WriteTo(w io.Writer) (int64, error) {
return sb.buf.WriteTo(w)
}
func (sb *streamBuffer) Close() error { func (sb *streamBuffer) Close() error {
sb.recvM.Lock() sb.recvM.Lock()
defer sb.recvM.Unlock() defer sb.recvM.Unlock()
@ -108,4 +110,5 @@ func (sb *streamBuffer) Close() error {
return sb.buf.Close() return sb.buf.Close()
} }
func (sb *streamBuffer) SetReadDeadline(t time.Time) { sb.buf.SetReadDeadline(t) } func (sb *streamBuffer) SetReadDeadline(t time.Time) { sb.buf.SetReadDeadline(t) }
func (sb *streamBuffer) SetWriteToTimeout(d time.Duration) { sb.buf.SetWriteToTimeout(d) }

View File

@ -3,7 +3,6 @@ package multiplex
import ( import (
"encoding/binary" "encoding/binary"
"io" "io"
//"log" //"log"
"sort" "sort"
"testing" "testing"
@ -21,10 +20,11 @@ func TestRecvNewFrame(t *testing.T) {
for _, n := range set { for _, n := range set {
bu64 := make([]byte, 8) bu64 := make([]byte, 8)
binary.BigEndian.PutUint64(bu64, n) binary.BigEndian.PutUint64(bu64, n)
sb.Write(&Frame{ frame := Frame{
Seq: n, Seq: n,
Payload: bu64, Payload: bu64,
}) }
sb.Write(frame)
} }
var sortedResult []uint64 var sortedResult []uint64
@ -80,7 +80,7 @@ func TestStreamBuffer_RecvThenClose(t *testing.T) {
Closing: 0, Closing: 0,
Payload: testData, Payload: testData,
} }
sb.Write(&testFrame) sb.Write(testFrame)
sb.Close() sb.Close()
readBuf := make([]byte, testDataLen) readBuf := make([]byte, testDataLen)

View File

@ -11,20 +11,18 @@ import (
// The point of a streamBufferedPipe is that Read() will block until data is available // The point of a streamBufferedPipe is that Read() will block until data is available
type streamBufferedPipe struct { type streamBufferedPipe struct {
// only alloc when on first Read or Write
buf *bytes.Buffer buf *bytes.Buffer
closed bool closed bool
rwCond *sync.Cond rwCond *sync.Cond
rDeadline time.Time rDeadline time.Time
wtTimeout time.Duration wtTimeout time.Duration
timeoutTimer *time.Timer
} }
func NewStreamBufferedPipe() *streamBufferedPipe { func NewStreamBufferedPipe() *streamBufferedPipe {
p := &streamBufferedPipe{ p := &streamBufferedPipe{
rwCond: sync.NewCond(&sync.Mutex{}), rwCond: sync.NewCond(&sync.Mutex{}),
buf: new(bytes.Buffer),
} }
return p return p
} }
@ -32,24 +30,23 @@ func NewStreamBufferedPipe() *streamBufferedPipe {
func (p *streamBufferedPipe) Read(target []byte) (int, error) { func (p *streamBufferedPipe) Read(target []byte) (int, error) {
p.rwCond.L.Lock() p.rwCond.L.Lock()
defer p.rwCond.L.Unlock() defer p.rwCond.L.Unlock()
if p.buf == nil {
p.buf = new(bytes.Buffer)
}
for { for {
if p.closed && p.buf.Len() == 0 { if p.closed && p.buf.Len() == 0 {
return 0, io.EOF return 0, io.EOF
} }
if !p.rDeadline.IsZero() {
hasRDeadline := !p.rDeadline.IsZero() d := time.Until(p.rDeadline)
if hasRDeadline { if d <= 0 {
if time.Until(p.rDeadline) <= 0 {
return 0, ErrTimeout return 0, ErrTimeout
} }
time.AfterFunc(d, p.rwCond.Broadcast)
} }
if p.buf.Len() > 0 { if p.buf.Len() > 0 {
break break
} }
if hasRDeadline {
p.broadcastAfter(time.Until(p.rDeadline))
}
p.rwCond.Wait() p.rwCond.Wait()
} }
n, err := p.buf.Read(target) n, err := p.buf.Read(target)
@ -58,9 +55,50 @@ func (p *streamBufferedPipe) Read(target []byte) (int, error) {
return n, err return n, err
} }
func (p *streamBufferedPipe) WriteTo(w io.Writer) (n int64, err error) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
if p.buf == nil {
p.buf = new(bytes.Buffer)
}
for {
if p.closed && p.buf.Len() == 0 {
return 0, io.EOF
}
if !p.rDeadline.IsZero() {
d := time.Until(p.rDeadline)
if d <= 0 {
return 0, ErrTimeout
}
if p.wtTimeout == 0 {
// if there hasn't been a scheduled broadcast
time.AfterFunc(d, p.rwCond.Broadcast)
}
}
if p.wtTimeout != 0 {
p.rDeadline = time.Now().Add(p.wtTimeout)
time.AfterFunc(p.wtTimeout, p.rwCond.Broadcast)
}
if p.buf.Len() > 0 {
written, er := p.buf.WriteTo(w)
n += written
if er != nil {
p.rwCond.Broadcast()
return n, er
}
p.rwCond.Broadcast()
} else {
p.rwCond.Wait()
}
}
}
func (p *streamBufferedPipe) Write(input []byte) (int, error) { func (p *streamBufferedPipe) Write(input []byte) (int, error) {
p.rwCond.L.Lock() p.rwCond.L.Lock()
defer p.rwCond.L.Unlock() defer p.rwCond.L.Unlock()
if p.buf == nil {
p.buf = new(bytes.Buffer)
}
for { for {
if p.closed { if p.closed {
return 0, io.ErrClosedPipe return 0, io.ErrClosedPipe
@ -94,9 +132,10 @@ func (p *streamBufferedPipe) SetReadDeadline(t time.Time) {
p.rwCond.Broadcast() p.rwCond.Broadcast()
} }
func (p *streamBufferedPipe) broadcastAfter(d time.Duration) { func (p *streamBufferedPipe) SetWriteToTimeout(d time.Duration) {
if p.timeoutTimer != nil { p.rwCond.L.Lock()
p.timeoutTimer.Stop() defer p.rwCond.L.Unlock()
}
p.timeoutTimer = time.AfterFunc(d, p.rwCond.Broadcast) p.wtTimeout = d
p.rwCond.Broadcast()
} }

View File

@ -1,43 +1,94 @@
package multiplex package multiplex
import ( import (
"bytes"
"math/rand" "math/rand"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
const readBlockTime = 500 * time.Millisecond
func TestPipeRW(t *testing.T) { func TestPipeRW(t *testing.T) {
pipe := NewStreamBufferedPipe() pipe := NewStreamBufferedPipe()
b := []byte{0x01, 0x02, 0x03} b := []byte{0x01, 0x02, 0x03}
n, err := pipe.Write(b) n, err := pipe.Write(b)
assert.NoError(t, err, "simple write") if n != len(b) {
assert.Equal(t, len(b), n, "number of bytes written") t.Error(
"For", "number of bytes written",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "simple write",
"expecting", "nil error",
"got", err,
)
return
}
b2 := make([]byte, len(b)) b2 := make([]byte, len(b))
n, err = pipe.Read(b2) n, err = pipe.Read(b2)
assert.NoError(t, err, "simple read") if n != len(b) {
assert.Equal(t, len(b), n, "number of bytes read") t.Error(
"For", "number of bytes read",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "simple read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "simple read",
"expecting", b,
"got", b2,
)
}
assert.Equal(t, b, b2)
} }
func TestReadBlock(t *testing.T) { func TestReadBlock(t *testing.T) {
pipe := NewStreamBufferedPipe() pipe := NewStreamBufferedPipe()
b := []byte{0x01, 0x02, 0x03} b := []byte{0x01, 0x02, 0x03}
go func() { go func() {
time.Sleep(readBlockTime) time.Sleep(100 * time.Millisecond)
pipe.Write(b) pipe.Write(b)
}() }()
b2 := make([]byte, len(b)) b2 := make([]byte, len(b))
n, err := pipe.Read(b2) n, err := pipe.Read(b2)
assert.NoError(t, err, "blocked read") if n != len(b) {
assert.Equal(t, len(b), n, "number of bytes read after block") t.Error(
"For", "number of bytes read after block",
assert.Equal(t, b, b2) "expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "blocked read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "blocked read",
"expecting", b,
"got", b2,
)
return
}
} }
func TestPartialRead(t *testing.T) { func TestPartialRead(t *testing.T) {
@ -46,17 +97,54 @@ func TestPartialRead(t *testing.T) {
pipe.Write(b) pipe.Write(b)
b1 := make([]byte, 1) b1 := make([]byte, 1)
n, err := pipe.Read(b1) n, err := pipe.Read(b1)
assert.NoError(t, err, "partial read of 1") if n != len(b1) {
assert.Equal(t, len(b1), n, "number of bytes in partial read of 1") t.Error(
"For", "number of bytes in partial read of 1",
assert.Equal(t, b[0], b1[0]) "expecting", len(b1),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "partial read of 1",
"expecting", "nil error",
"got", err,
)
return
}
if b1[0] != b[0] {
t.Error(
"For", "partial read of 1",
"expecting", b[0],
"got", b1[0],
)
}
b2 := make([]byte, 2) b2 := make([]byte, 2)
n, err = pipe.Read(b2) n, err = pipe.Read(b2)
assert.NoError(t, err, "partial read of 2") if n != len(b2) {
assert.Equal(t, len(b2), n, "number of bytes in partial read of 2") t.Error(
"For", "number of bytes in partial read of 2",
assert.Equal(t, b[1:], b2) "expecting", len(b2),
"got", n,
)
}
if err != nil {
t.Error(
"For", "partial read of 2",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b[1:], b2) {
t.Error(
"For", "partial read of 2",
"expecting", b[1:],
"got", b2,
)
return
}
} }
func TestReadAfterClose(t *testing.T) { func TestReadAfterClose(t *testing.T) {
@ -66,10 +154,29 @@ func TestReadAfterClose(t *testing.T) {
b2 := make([]byte, len(b)) b2 := make([]byte, len(b))
pipe.Close() pipe.Close()
n, err := pipe.Read(b2) n, err := pipe.Read(b2)
assert.NoError(t, err, "simple read") if n != len(b) {
assert.Equal(t, len(b), n, "number of bytes read") t.Error(
"For", "number of bytes read",
assert.Equal(t, b, b2) "expecting", len(b),
"got", n,
)
}
if err != nil {
t.Error(
"For", "simple read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "simple read",
"expecting", b,
"got", b2,
)
return
}
} }
func BenchmarkBufferedPipe_RW(b *testing.B) { func BenchmarkBufferedPipe_RW(b *testing.B) {

View File

@ -2,14 +2,13 @@ package multiplex
import ( import (
"bytes" "bytes"
"github.com/cbeuw/Cloak/internal/common"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"testing" "testing"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/stretchr/testify/assert"
"github.com/cbeuw/connutil" "github.com/cbeuw/connutil"
) )
@ -37,10 +36,9 @@ func BenchmarkStream_Write_Ordered(b *testing.B) {
testData := make([]byte, testDataLen) testData := make([]byte, testDataLen)
rand.Read(testData) rand.Read(testData)
eMethods := map[string]byte{ eMethods := map[string]byte{
"plain": EncryptionMethodPlain, "plain": E_METHOD_PLAIN,
"chacha20-poly1305": EncryptionMethodChaha20Poly1305, "chacha20-poly1305": E_METHOD_CHACHA20_POLY1305,
"aes-256-gcm": EncryptionMethodAES256GCM, "aes-gcm": E_METHOD_AES_GCM,
"aes-128-gcm": EncryptionMethodAES128GCM,
} }
for name, method := range eMethods { for name, method := range eMethods {
@ -57,11 +55,62 @@ func BenchmarkStream_Write_Ordered(b *testing.B) {
} }
} }
/*
func BenchmarkStream_Read_Ordered(b *testing.B) {
var sessionKey [32]byte
rand.Read(sessionKey[:])
sesh := setupSesh(false, sessionKey)
testPayload := make([]byte, payloadLen)
rand.Read(testPayload)
f := &Frame{
1,
0,
0,
testPayload,
}
obfsBuf := make([]byte, 17000)
l, _ := net.Listen("tcp", "127.0.0.1:0")
go func() {
// potentially bottlenecked here rather than the actual stream read throughput
conn, _ := net.Dial("tcp", l.Addr().String())
for {
i, _ := sesh.Obfs(f, obfsBuf)
f.Seq += 1
_, err := conn.Write(obfsBuf[:i])
if err != nil {
b.Error("cannot write to connection", err)
}
}
}()
conn, _ := l.Accept()
sesh.AddConnection(conn)
stream, err := sesh.Accept()
if err != nil {
b.Error("failed to accept stream", err)
}
//time.Sleep(5*time.Second) // wait for buffer to fill up
readBuf := make([]byte, payloadLen)
b.SetBytes(payloadLen)
b.ResetTimer()
for j := 0; j < b.N; j++ {
stream.Read(readBuf)
}
}
*/
func TestStream_Write(t *testing.T) { func TestStream_Write(t *testing.T) {
hole := connutil.Discard() hole := connutil.Discard()
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain) sesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
sesh.AddConnection(hole) sesh.AddConnection(hole)
testData := make([]byte, payloadLen) testData := make([]byte, payloadLen)
rand.Read(testData) rand.Read(testData)
@ -80,8 +129,8 @@ func TestStream_WriteSync(t *testing.T) {
// Close calls made after write MUST have a higher seq // Close calls made after write MUST have a higher seq
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
clientSesh := setupSesh(false, sessionKey, EncryptionMethodPlain) clientSesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
serverSesh := setupSesh(false, sessionKey, EncryptionMethodPlain) serverSesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
w, r := connutil.AsyncPipe() w, r := connutil.AsyncPipe()
clientSesh.AddConnection(common.NewTLSConn(w)) clientSesh.AddConnection(common.NewTLSConn(w))
serverSesh.AddConnection(common.NewTLSConn(r)) serverSesh.AddConnection(common.NewTLSConn(r))
@ -136,13 +185,13 @@ func TestStream_Close(t *testing.T) {
} }
t.Run("active closing", func(t *testing.T) { t.Run("active closing", func(t *testing.T) {
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain) sesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
rawConn, rawWritingEnd := connutil.AsyncPipe() rawConn, rawWritingEnd := connutil.AsyncPipe()
sesh.AddConnection(common.NewTLSConn(rawConn)) sesh.AddConnection(common.NewTLSConn(rawConn))
writingEnd := common.NewTLSConn(rawWritingEnd) writingEnd := common.NewTLSConn(rawWritingEnd)
obfsBuf := make([]byte, 512) obfsBuf := make([]byte, 512)
i, _ := sesh.obfuscate(dataFrame, obfsBuf, 0) i, _ := sesh.Obfs(dataFrame, obfsBuf, 0)
_, err := writingEnd.Write(obfsBuf[:i]) _, err := writingEnd.Write(obfsBuf[:i])
if err != nil { if err != nil {
t.Error("failed to write from remote end") t.Error("failed to write from remote end")
@ -152,40 +201,35 @@ func TestStream_Close(t *testing.T) {
t.Error("failed to accept stream", err) t.Error("failed to accept stream", err)
return return
} }
time.Sleep(500 * time.Millisecond)
err = stream.Close() err = stream.Close()
if err != nil { if err != nil {
t.Error("failed to actively close stream", err) t.Error("failed to actively close stream", err)
return return
} }
sesh.streamsM.Lock() if sI, _ := sesh.streams.Load(stream.(*Stream).id); sI != nil {
if s, _ := sesh.streams[stream.(*Stream).id]; s != nil {
sesh.streamsM.Unlock()
t.Error("stream still exists") t.Error("stream still exists")
return return
} }
sesh.streamsM.Unlock()
readBuf := make([]byte, len(testPayload)) readBuf := make([]byte, len(testPayload))
_, err = io.ReadFull(stream, readBuf) _, err = io.ReadFull(stream, readBuf)
if err != nil { if err != nil {
t.Errorf("cannot read resiual data: %v", err) t.Errorf("can't read residual data %v", err)
} }
if !bytes.Equal(readBuf, testPayload) { if !bytes.Equal(readBuf, testPayload) {
t.Errorf("read wrong data") t.Errorf("read wrong data")
} }
}) })
t.Run("passive closing", func(t *testing.T) { t.Run("passive closing", func(t *testing.T) {
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain) sesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
rawConn, rawWritingEnd := connutil.AsyncPipe() rawConn, rawWritingEnd := connutil.AsyncPipe()
sesh.AddConnection(common.NewTLSConn(rawConn)) sesh.AddConnection(common.NewTLSConn(rawConn))
writingEnd := common.NewTLSConn(rawWritingEnd) writingEnd := common.NewTLSConn(rawWritingEnd)
obfsBuf := make([]byte, 512) obfsBuf := make([]byte, 512)
i, err := sesh.obfuscate(dataFrame, obfsBuf, 0) i, err := sesh.Obfs(dataFrame, obfsBuf, 0)
if err != nil { if err != nil {
t.Errorf("failed to obfuscate frame %v", err) t.Errorf("failed to obfuscate frame %v", err)
} }
@ -203,11 +247,11 @@ func TestStream_Close(t *testing.T) {
closingFrame := &Frame{ closingFrame := &Frame{
1, 1,
dataFrame.Seq + 1, dataFrame.Seq + 1,
closingStream, C_STREAM,
testPayload, testPayload,
} }
i, err = sesh.obfuscate(closingFrame, obfsBuf, 0) i, err = sesh.Obfs(closingFrame, obfsBuf, 0)
if err != nil { if err != nil {
t.Errorf("failed to obfuscate frame %v", err) t.Errorf("failed to obfuscate frame %v", err)
} }
@ -219,11 +263,11 @@ func TestStream_Close(t *testing.T) {
closingFrameDup := &Frame{ closingFrameDup := &Frame{
1, 1,
dataFrame.Seq + 2, dataFrame.Seq + 2,
closingStream, C_STREAM,
testPayload, testPayload,
} }
i, err = sesh.obfuscate(closingFrameDup, obfsBuf, 0) i, err = sesh.Obfs(closingFrameDup, obfsBuf, 0)
if err != nil { if err != nil {
t.Errorf("failed to obfuscate frame %v", err) t.Errorf("failed to obfuscate frame %v", err)
} }
@ -237,13 +281,11 @@ func TestStream_Close(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("can't read residual data %v", err) t.Errorf("can't read residual data %v", err)
} }
time.Sleep(100 * time.Millisecond)
assert.Eventually(t, func() bool { if sI, _ := sesh.streams.Load(stream.(*Stream).id); sI != nil {
sesh.streamsM.Lock() t.Error("stream still exists")
s, _ := sesh.streams[stream.(*Stream).id] return
sesh.streamsM.Unlock() }
return s == nil
}, time.Second, 10*time.Millisecond, "streams still exists")
}) })
} }
@ -264,18 +306,19 @@ func TestStream_Read(t *testing.T) {
} }
var streamID uint32 var streamID uint32
buf := make([]byte, 10)
obfsBuf := make([]byte, 512)
for name, unordered := range seshes { for name, unordered := range seshes {
sesh := setupSesh(unordered, emptyKey, EncryptionMethodPlain) sesh := setupSesh(unordered, emptyKey, E_METHOD_PLAIN)
rawConn, rawWritingEnd := connutil.AsyncPipe() rawConn, rawWritingEnd := connutil.AsyncPipe()
sesh.AddConnection(common.NewTLSConn(rawConn)) sesh.AddConnection(common.NewTLSConn(rawConn))
writingEnd := common.NewTLSConn(rawWritingEnd) writingEnd := common.NewTLSConn(rawWritingEnd)
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
buf := make([]byte, 10)
obfsBuf := make([]byte, 512)
t.Run("Plain read", func(t *testing.T) { t.Run("Plain read", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
stream, err := sesh.Accept() stream, err := sesh.Accept()
@ -300,7 +343,7 @@ func TestStream_Read(t *testing.T) {
}) })
t.Run("Nil buf", func(t *testing.T) { t.Run("Nil buf", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
stream, _ := sesh.Accept() stream, _ := sesh.Accept()
@ -312,22 +355,21 @@ func TestStream_Read(t *testing.T) {
}) })
t.Run("Read after stream close", func(t *testing.T) { t.Run("Read after stream close", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
stream, _ := sesh.Accept() stream, _ := sesh.Accept()
time.Sleep(500 * time.Millisecond)
stream.Close() stream.Close()
i, err := stream.Read(buf)
_, err := io.ReadFull(stream, buf[:smallPayloadLen])
if err != nil { if err != nil {
t.Errorf("cannot read residual data: %v", err) t.Error("failed to read", err)
} }
if !bytes.Equal(buf[:smallPayloadLen], testPayload) { if i != smallPayloadLen {
t.Errorf("expected read %v, got %v", smallPayloadLen, i)
}
if !bytes.Equal(buf[:i], testPayload) {
t.Error("expected", testPayload, t.Error("expected", testPayload,
"got", buf[:smallPayloadLen]) "got", buf[:i])
} }
_, err = stream.Read(buf) _, err = stream.Read(buf)
if err == nil { if err == nil {
@ -337,21 +379,21 @@ func TestStream_Read(t *testing.T) {
}) })
t.Run("Read after session close", func(t *testing.T) { t.Run("Read after session close", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
stream, _ := sesh.Accept() stream, _ := sesh.Accept()
time.Sleep(500 * time.Millisecond)
sesh.Close() sesh.Close()
_, err := io.ReadFull(stream, buf[:smallPayloadLen]) i, err := stream.Read(buf)
if err != nil { if err != nil {
t.Errorf("cannot read resiual data: %v", err) t.Error("failed to read", err)
} }
if !bytes.Equal(buf[:smallPayloadLen], testPayload) { if i != smallPayloadLen {
t.Errorf("expected read %v, got %v", smallPayloadLen, i)
}
if !bytes.Equal(buf[:i], testPayload) {
t.Error("expected", testPayload, t.Error("expected", testPayload,
"got", buf[:smallPayloadLen]) "got", buf[:i])
} }
_, err = stream.Read(buf) _, err = stream.Read(buf)
if err == nil { if err == nil {
@ -363,10 +405,35 @@ func TestStream_Read(t *testing.T) {
} }
} }
func TestStream_SetWriteToTimeout(t *testing.T) {
seshes := map[string]*Session{
"ordered": setupSesh(false, emptyKey, E_METHOD_PLAIN),
"unordered": setupSesh(true, emptyKey, E_METHOD_PLAIN),
}
for name, sesh := range seshes {
t.Run(name, func(t *testing.T) {
stream, _ := sesh.OpenStream()
stream.SetWriteToTimeout(100 * time.Millisecond)
done := make(chan struct{})
go func() {
stream.WriteTo(ioutil.Discard)
done <- struct{}{}
}()
select {
case <-done:
return
case <-time.After(500 * time.Millisecond):
t.Error("didn't timeout")
}
})
}
}
func TestStream_SetReadFromTimeout(t *testing.T) { func TestStream_SetReadFromTimeout(t *testing.T) {
seshes := map[string]*Session{ seshes := map[string]*Session{
"ordered": setupSesh(false, emptyKey, EncryptionMethodPlain), "ordered": setupSesh(false, emptyKey, E_METHOD_PLAIN),
"unordered": setupSesh(true, emptyKey, EncryptionMethodPlain), "unordered": setupSesh(true, emptyKey, E_METHOD_PLAIN),
} }
for name, sesh := range seshes { for name, sesh := range seshes {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {

View File

@ -2,23 +2,19 @@ package multiplex
import ( import (
"errors" "errors"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"math/rand/v2" "math/rand"
"net" "net"
"sync" "sync"
"sync/atomic" "sync/atomic"
) )
type switchboardStrategy int
const ( const (
fixedConnMapping switchboardStrategy = iota FIXED_CONN_MAPPING switchboardStrategy = iota
uniformSpread UNIFORM_SPREAD
) )
// switchboard represents the connection pool. It is responsible for managing // switchboard is responsible for managing TCP connections between client and server.
// transport-layer connections between client and server.
// It has several purposes: constantly receiving incoming data from all connections // It has several purposes: constantly receiving incoming data from all connections
// and pass them to Session.recvDataFromRemote(); accepting data through // and pass them to Session.recvDataFromRemote(); accepting data through
// switchboard.send(), in which it selects a connection according to its // switchboard.send(), in which it selects a connection according to its
@ -30,131 +26,148 @@ type switchboard struct {
valve Valve valve Valve
strategy switchboardStrategy strategy switchboardStrategy
// map of connId to net.Conn
conns sync.Map conns sync.Map
connsCount uint32 numConns uint32
randPool sync.Pool nextConnId uint32
broken uint32 broken uint32
} }
func makeSwitchboard(sesh *Session) *switchboard { func makeSwitchboard(sesh *Session) *switchboard {
var strategy switchboardStrategy
if sesh.Unordered {
log.Debug("Connection is unordered")
strategy = UNIFORM_SPREAD
} else {
strategy = FIXED_CONN_MAPPING
}
sb := &switchboard{ sb := &switchboard{
session: sesh, session: sesh,
strategy: uniformSpread, strategy: strategy,
valve: sesh.Valve, valve: sesh.Valve,
randPool: sync.Pool{New: func() interface{} { nextConnId: 1,
var state [32]byte
common.CryptoRandRead(state[:])
return rand.New(rand.NewChaCha8(state))
}},
} }
return sb return sb
} }
var errBrokenSwitchboard = errors.New("the switchboard is broken") var errBrokenSwitchboard = errors.New("the switchboard is broken")
func (sb *switchboard) addConn(conn net.Conn) { func (sb *switchboard) connsCount() int {
connId := atomic.AddUint32(&sb.connsCount, 1) - 1 return int(atomic.LoadUint32(&sb.numConns))
sb.conns.Store(connId, conn)
go sb.deplex(conn)
} }
// a pointer to assignedConn is passed here so that the switchboard can reassign it if that conn isn't usable func (sb *switchboard) addConn(conn net.Conn) {
func (sb *switchboard) send(data []byte, assignedConn *net.Conn) (n int, err error) { connId := atomic.AddUint32(&sb.nextConnId, 1) - 1
atomic.AddUint32(&sb.numConns, 1)
sb.conns.Store(connId, conn)
go sb.deplex(connId, conn)
}
// a pointer to connId is passed here so that the switchboard can reassign it if that connId isn't usable
func (sb *switchboard) send(data []byte, connId *uint32) (n int, err error) {
writeAndRegUsage := func(conn net.Conn, d []byte) (int, error) {
n, err = conn.Write(d)
if err != nil {
sb.conns.Delete(*connId)
sb.close("failed to write to remote " + err.Error())
return n, err
}
sb.valve.AddTx(int64(n))
return n, nil
}
sb.valve.txWait(len(data)) sb.valve.txWait(len(data))
if atomic.LoadUint32(&sb.broken) == 1 { if atomic.LoadUint32(&sb.broken) == 1 || sb.connsCount() == 0 {
return 0, errBrokenSwitchboard return 0, errBrokenSwitchboard
} }
var conn net.Conn
switch sb.strategy { switch sb.strategy {
case uniformSpread: case UNIFORM_SPREAD:
conn, err = sb.pickRandConn() _, conn, err := sb.pickRandConn()
if err != nil { if err != nil {
return 0, errBrokenSwitchboard return 0, errBrokenSwitchboard
} }
n, err = conn.Write(data) return writeAndRegUsage(conn, data)
if err != nil { case FIXED_CONN_MAPPING:
sb.session.SetTerminalMsg("failed to send to remote " + err.Error()) connI, ok := sb.conns.Load(*connId)
sb.session.passiveClose() if ok {
return n, err conn := connI.(net.Conn)
} return writeAndRegUsage(conn, data)
case fixedConnMapping: } else {
// FIXME: this strategy has a tendency to cause a TLS conn socket buffer to fill up, newConnId, conn, err := sb.pickRandConn()
// which is a problem when multiple streams are mapped to the same conn, resulting
// in all such streams being blocked.
conn = *assignedConn
if conn == nil {
conn, err = sb.pickRandConn()
if err != nil { if err != nil {
sb.session.SetTerminalMsg("failed to pick a connection " + err.Error()) return 0, errBrokenSwitchboard
sb.session.passiveClose()
return 0, err
} }
*assignedConn = conn *connId = newConnId
} return writeAndRegUsage(conn, data)
n, err = conn.Write(data)
if err != nil {
sb.session.SetTerminalMsg("failed to send to remote " + err.Error())
sb.session.passiveClose()
return n, err
} }
default: default:
return 0, errors.New("unsupported traffic distribution strategy") return 0, errors.New("unsupported traffic distribution strategy")
} }
sb.valve.AddTx(int64(n))
return n, nil
} }
// returns a random conn. This function can be called concurrently. // returns a random connId
func (sb *switchboard) pickRandConn() (net.Conn, error) { func (sb *switchboard) pickRandConn() (uint32, net.Conn, error) {
if atomic.LoadUint32(&sb.broken) == 1 { connCount := sb.connsCount()
return nil, errBrokenSwitchboard if atomic.LoadUint32(&sb.broken) == 1 || connCount == 0 {
return 0, nil, errBrokenSwitchboard
} }
connsCount := atomic.LoadUint32(&sb.connsCount) // there is no guarantee that sb.conns still has the same amount of entries
if connsCount == 0 { // between the count loop and the pick loop
return nil, errBrokenSwitchboard // so if the r > len(sb.conns) at the point of range call, the last visited element is picked
var id uint32
var conn net.Conn
r := rand.Intn(connCount)
var c int
sb.conns.Range(func(connIdI, connI interface{}) bool {
if r == c {
id = connIdI.(uint32)
conn = connI.(net.Conn)
return false
}
c++
return true
})
// if len(sb.conns) is 0
if conn == nil {
return 0, nil, errBrokenSwitchboard
} }
return id, conn, nil
}
randReader := sb.randPool.Get().(*rand.Rand) func (sb *switchboard) close(terminalMsg string) {
connId := randReader.Uint32N(connsCount) atomic.StoreUint32(&sb.broken, 1)
sb.randPool.Put(randReader) if !sb.session.IsClosed() {
sb.session.SetTerminalMsg(terminalMsg)
ret, ok := sb.conns.Load(connId) sb.session.passiveClose()
if !ok {
log.Errorf("failed to get conn %d", connId)
return nil, errBrokenSwitchboard
} }
return ret.(net.Conn), nil
} }
// actively triggered by session.Close() // actively triggered by session.Close()
func (sb *switchboard) closeAll() { func (sb *switchboard) closeAll() {
if !atomic.CompareAndSwapUint32(&sb.broken, 0, 1) { sb.conns.Range(func(key, connI interface{}) bool {
return conn := connI.(net.Conn)
} conn.Close()
atomic.StoreUint32(&sb.connsCount, 0) sb.conns.Delete(key)
sb.conns.Range(func(_, conn interface{}) bool {
conn.(net.Conn).Close()
sb.conns.Delete(conn)
return true return true
}) })
} }
// deplex function costantly reads from a TCP connection // deplex function costantly reads from a TCP connection
func (sb *switchboard) deplex(conn net.Conn) { func (sb *switchboard) deplex(connId uint32, conn net.Conn) {
defer conn.Close() defer conn.Close()
buf := make([]byte, sb.session.connReceiveBufferSize) buf := make([]byte, sb.session.ConnReceiveBufferSize)
for { for {
n, err := conn.Read(buf) n, err := conn.Read(buf)
sb.valve.rxWait(n) sb.valve.rxWait(n)
sb.valve.AddRx(int64(n)) sb.valve.AddRx(int64(n))
if err != nil { if err != nil {
log.Debugf("a connection for session %v has closed: %v", sb.session.id, err) log.Debugf("a connection for session %v has closed: %v", sb.session.id, err)
sb.session.SetTerminalMsg("a connection has dropped unexpectedly") sb.conns.Delete(connId)
sb.session.passiveClose() atomic.AddUint32(&sb.numConns, ^uint32(0))
sb.close("a connection has dropped unexpectedly")
return return
} }

View File

@ -1,14 +1,11 @@
package multiplex package multiplex
import ( import (
"github.com/cbeuw/connutil"
"math/rand" "math/rand"
"sync" "sync"
"sync/atomic"
"testing" "testing"
"time" "time"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
func TestSwitchboard_Send(t *testing.T) { func TestSwitchboard_Send(t *testing.T) {
@ -16,14 +13,14 @@ func TestSwitchboard_Send(t *testing.T) {
sesh := MakeSession(0, seshConfig) sesh := MakeSession(0, seshConfig)
hole0 := connutil.Discard() hole0 := connutil.Discard()
sesh.sb.addConn(hole0) sesh.sb.addConn(hole0)
conn, err := sesh.sb.pickRandConn() connId, _, err := sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
} }
data := make([]byte, 1000) data := make([]byte, 1000)
rand.Read(data) rand.Read(data)
_, err = sesh.sb.send(data, &conn) _, err = sesh.sb.send(data, &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -31,23 +28,23 @@ func TestSwitchboard_Send(t *testing.T) {
hole1 := connutil.Discard() hole1 := connutil.Discard()
sesh.sb.addConn(hole1) sesh.sb.addConn(hole1)
conn, err = sesh.sb.pickRandConn() connId, _, err = sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
} }
_, err = sesh.sb.send(data, &conn) _, err = sesh.sb.send(data, &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
conn, err = sesh.sb.pickRandConn() connId, _, err = sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
} }
_, err = sesh.sb.send(data, &conn) _, err = sesh.sb.send(data, &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -73,7 +70,7 @@ func BenchmarkSwitchboard_Send(b *testing.B) {
seshConfig := SessionConfig{} seshConfig := SessionConfig{}
sesh := MakeSession(0, seshConfig) sesh := MakeSession(0, seshConfig)
sesh.sb.addConn(hole) sesh.sb.addConn(hole)
conn, err := sesh.sb.pickRandConn() connId, _, err := sesh.sb.pickRandConn()
if err != nil { if err != nil {
b.Error("failed to get a random conn", err) b.Error("failed to get a random conn", err)
return return
@ -83,7 +80,7 @@ func BenchmarkSwitchboard_Send(b *testing.B) {
b.SetBytes(int64(len(data))) b.SetBytes(int64(len(data)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
sesh.sb.send(data, &conn) sesh.sb.send(data, &connId)
} }
} }
@ -94,7 +91,7 @@ func TestSwitchboard_TxCredit(t *testing.T) {
sesh := MakeSession(0, seshConfig) sesh := MakeSession(0, seshConfig)
hole := connutil.Discard() hole := connutil.Discard()
sesh.sb.addConn(hole) sesh.sb.addConn(hole)
conn, err := sesh.sb.pickRandConn() connId, _, err := sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
@ -102,10 +99,10 @@ func TestSwitchboard_TxCredit(t *testing.T) {
data := make([]byte, 1000) data := make([]byte, 1000)
rand.Read(data) rand.Read(data)
t.Run("fixed conn mapping", func(t *testing.T) { t.Run("FIXED CONN MAPPING", func(t *testing.T) {
*sesh.sb.valve.(*LimitedValve).tx = 0 *sesh.sb.valve.(*LimitedValve).tx = 0
sesh.sb.strategy = fixedConnMapping sesh.sb.strategy = FIXED_CONN_MAPPING
n, err := sesh.sb.send(data[:10], &conn) n, err := sesh.sb.send(data[:10], &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -118,10 +115,10 @@ func TestSwitchboard_TxCredit(t *testing.T) {
t.Error("tx credit didn't increase by 10") t.Error("tx credit didn't increase by 10")
} }
}) })
t.Run("uniform spread", func(t *testing.T) { t.Run("UNIFORM", func(t *testing.T) {
*sesh.sb.valve.(*LimitedValve).tx = 0 *sesh.sb.valve.(*LimitedValve).tx = 0
sesh.sb.strategy = uniformSpread sesh.sb.strategy = UNIFORM_SPREAD
n, err := sesh.sb.send(data[:10], &conn) n, err := sesh.sb.send(data[:10], &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -139,7 +136,7 @@ func TestSwitchboard_TxCredit(t *testing.T) {
func TestSwitchboard_CloseOnOneDisconn(t *testing.T) { func TestSwitchboard_CloseOnOneDisconn(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain) sesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
conn0client, conn0server := connutil.AsyncPipe() conn0client, conn0server := connutil.AsyncPipe()
sesh.AddConnection(conn0client) sesh.AddConnection(conn0client)
@ -148,11 +145,11 @@ func TestSwitchboard_CloseOnOneDisconn(t *testing.T) {
sesh.AddConnection(conn1client) sesh.AddConnection(conn1client)
conn0server.Close() conn0server.Close()
time.Sleep(500 * time.Millisecond)
assert.Eventually(t, func() bool { if !sesh.IsClosed() {
return sesh.IsClosed() t.Error("session not closed after one conn is disconnected")
}, time.Second, 10*time.Millisecond, "session not closed after one conn is disconnected") return
}
if _, err := conn1client.Write([]byte{0x00}); err == nil { if _, err := conn1client.Write([]byte{0x00}); err == nil {
t.Error("the other conn is still connected") t.Error("the other conn is still connected")
return return
@ -175,13 +172,15 @@ func TestSwitchboard_ConnsCount(t *testing.T) {
} }
wg.Wait() wg.Wait()
if atomic.LoadUint32(&sesh.sb.connsCount) != 1000 { if sesh.sb.connsCount() != 1000 {
t.Error("connsCount incorrect") t.Error("connsCount incorrect")
} }
sesh.sb.closeAll() sesh.sb.closeAll()
assert.Eventuallyf(t, func() bool { time.Sleep(500 * time.Millisecond)
return atomic.LoadUint32(&sesh.sb.connsCount) == 0 if sesh.sb.connsCount() != 0 {
}, time.Second, 10*time.Millisecond, "connsCount incorrect: %v", atomic.LoadUint32(&sesh.sb.connsCount)) t.Error("connsCount incorrect")
}
} }

View File

@ -4,11 +4,11 @@ import (
"crypto" "crypto"
"errors" "errors"
"fmt" "fmt"
"io"
"net"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
"io"
"math/rand"
"net"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -45,7 +45,8 @@ func (TLS) makeResponder(clientHelloSessionId []byte, sharedSecret [32]byte) Res
// the cert length needs to be the same for all handshakes belonging to the same session // the cert length needs to be the same for all handshakes belonging to the same session
// we can use sessionKey as a seed here to ensure consistency // we can use sessionKey as a seed here to ensure consistency
possibleCertLengths := []int{42, 27, 68, 59, 36, 44, 46} possibleCertLengths := []int{42, 27, 68, 59, 36, 44, 46}
cert := make([]byte, possibleCertLengths[common.RandInt(len(possibleCertLengths))]) rand.Seed(int64(sessionKey[0]))
cert := make([]byte, possibleCertLengths[rand.Intn(len(possibleCertLengths))])
common.RandRead(randSource, cert) common.RandRead(randSource, cert)
var nonce [12]byte var nonce [12]byte
@ -78,13 +79,7 @@ func (TLS) unmarshalClientHello(ch *ClientHello, staticPv crypto.PrivateKey) (fr
return return
} }
var sharedSecret []byte copy(fragments.sharedSecret[:], ecdh.GenerateSharedSecret(staticPv, ephPub))
sharedSecret, err = ecdh.GenerateSharedSecret(staticPv, ephPub)
if err != nil {
return
}
copy(fragments.sharedSecret[:], sharedSecret)
var keyShare []byte var keyShare []byte
keyShare, err = parseKeyShare(ch.extensions[[2]byte{0x00, 0x33}]) keyShare, err = parseKeyShare(ch.extensions[[2]byte{0x00, 0x33}])
if err != nil { if err != nil {

View File

@ -5,7 +5,6 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
) )
@ -164,12 +163,12 @@ func parseClientHello(data []byte) (ret *ClientHello, err error) {
func composeServerHello(sessionId []byte, nonce [12]byte, encryptedSessionKeyWithTag [48]byte) []byte { func composeServerHello(sessionId []byte, nonce [12]byte, encryptedSessionKeyWithTag [48]byte) []byte {
var serverHello [11][]byte var serverHello [11][]byte
serverHello[0] = []byte{0x02} // handshake type serverHello[0] = []byte{0x02} // handshake type
serverHello[1] = []byte{0x00, 0x00, 0x76} // length 118 serverHello[1] = []byte{0x00, 0x00, 0x76} // length 77
serverHello[2] = []byte{0x03, 0x03} // server version serverHello[2] = []byte{0x03, 0x03} // server version
serverHello[3] = append(nonce[0:12], encryptedSessionKeyWithTag[0:20]...) // random 32 bytes serverHello[3] = append(nonce[0:12], encryptedSessionKeyWithTag[0:20]...) // random 32 bytes
serverHello[4] = []byte{0x20} // session id length 32 serverHello[4] = []byte{0x20} // session id length 32
serverHello[5] = sessionId // session id serverHello[5] = sessionId // session id
serverHello[6] = []byte{0x13, 0x02} // cipher suite TLS_AES_256_GCM_SHA384 serverHello[6] = []byte{0xc0, 0x30} // cipher suite TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
serverHello[7] = []byte{0x00} // compression method null serverHello[7] = []byte{0x00} // compression method null
serverHello[8] = []byte{0x00, 0x2e} // extensions length 46 serverHello[8] = []byte{0x00, 0x2e} // extensions length 46

View File

@ -1,9 +1,8 @@
package server package server
import ( import (
"sync"
"github.com/cbeuw/Cloak/internal/server/usermanager" "github.com/cbeuw/Cloak/internal/server/usermanager"
"sync"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
) )

View File

@ -3,13 +3,12 @@ package server
import ( import (
"crypto/rand" "crypto/rand"
"encoding/base64" "encoding/base64"
"io/ioutil"
"os"
"testing"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
"github.com/cbeuw/Cloak/internal/server/usermanager" "github.com/cbeuw/Cloak/internal/server/usermanager"
"io/ioutil"
"os"
"testing"
) )
func getSeshConfig(unordered bool) mux.SessionConfig { func getSeshConfig(unordered bool) mux.SessionConfig {

View File

@ -5,9 +5,8 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"time"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -51,7 +50,7 @@ func decryptClientInfo(fragments authFragments, serverTime time.Time) (info Clie
timestamp := int64(binary.BigEndian.Uint64(plaintext[29:37])) timestamp := int64(binary.BigEndian.Uint64(plaintext[29:37]))
clientTime := time.Unix(timestamp, 0) clientTime := time.Unix(timestamp, 0)
if !(clientTime.After(serverTime.Add(-timestampTolerance)) && clientTime.Before(serverTime.Add(timestampTolerance))) { if !(clientTime.After(serverTime.Truncate(TIMESTAMP_TOLERANCE)) && clientTime.Before(serverTime.Add(TIMESTAMP_TOLERANCE))) {
err = fmt.Errorf("%v: received timestamp %v", ErrTimestampOutOfWindow, timestamp) err = fmt.Errorf("%v: received timestamp %v", ErrTimestampOutOfWindow, timestamp)
return return
} }
@ -61,7 +60,7 @@ func decryptClientInfo(fragments authFragments, serverTime time.Time) (info Clie
var ErrReplay = errors.New("duplicate random") var ErrReplay = errors.New("duplicate random")
var ErrBadProxyMethod = errors.New("invalid proxy method") var ErrBadProxyMethod = errors.New("invalid proxy method")
var ErrBadDecryption = errors.New("decryption/authentication failure") var ErrBadDecryption = errors.New("decryption/authentication faliure")
// AuthFirstPacket checks if the first packet of data is ClientHello or HTTP GET, and checks if it was from a Cloak client // AuthFirstPacket checks if the first packet of data is ClientHello or HTTP GET, and checks if it was from a Cloak client
// if it is from a Cloak client, it returns the ClientInfo with the decrypted fields. It doesn't check if the user // if it is from a Cloak client, it returns the ClientInfo with the decrypted fields. It doesn't check if the user
@ -78,12 +77,16 @@ func AuthFirstPacket(firstPacket []byte, transport Transport, sta *State) (info
return return
} }
info, err = decryptClientInfo(fragments, sta.WorldState.Now().UTC()) info, err = decryptClientInfo(fragments, sta.WorldState.Now())
if err != nil { if err != nil {
log.Debug(err) log.Debug(err)
err = fmt.Errorf("%w: %v", ErrBadDecryption, err) err = fmt.Errorf("%w: %v", ErrBadDecryption, err)
return return
} }
if _, ok := sta.ProxyBook[info.ProxyMethod]; !ok {
err = ErrBadProxyMethod
return
}
info.Transport = transport info.Transport = transport
return return
} }

View File

@ -4,14 +4,13 @@ import (
"crypto" "crypto"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"testing"
"time"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
"testing"
"time"
) )
func TestDecryptClientInfo(t *testing.T) { func TestTouchStone(t *testing.T) {
pvBytes, _ := hex.DecodeString("10de5a3c4a4d04efafc3e06d1506363a72bd6d053baef123e6a9a79a0c04b547") pvBytes, _ := hex.DecodeString("10de5a3c4a4d04efafc3e06d1506363a72bd6d053baef123e6a9a79a0c04b547")
p, _ := ecdh.Unmarshal(pvBytes) p, _ := ecdh.Unmarshal(pvBytes)
staticPv := p.(crypto.PrivateKey) staticPv := p.(crypto.PrivateKey)
@ -50,7 +49,7 @@ func TestDecryptClientInfo(t *testing.T) {
t.Errorf("expecting no error, got %v", err) t.Errorf("expecting no error, got %v", err)
return return
} }
nineSixSixM50 := time.Unix(1565998966, 0).Add(-50) nineSixSixM50 := time.Unix(1565998966, 0).Truncate(50)
_, err = decryptClientInfo(ai, nineSixSixM50) _, err = decryptClientInfo(ai, nineSixSixM50)
if err != nil { if err != nil {
t.Errorf("expecting no error, got %v", err) t.Errorf("expecting no error, got %v", err)
@ -67,7 +66,7 @@ func TestDecryptClientInfo(t *testing.T) {
return return
} }
nineSixSixOver := time.Unix(1565998966, 0).Add(timestampTolerance + 10) nineSixSixOver := time.Unix(1565998966, 0).Add(TIMESTAMP_TOLERANCE + 10)
_, err = decryptClientInfo(ai, nineSixSixOver) _, err = decryptClientInfo(ai, nineSixSixOver)
if err == nil { if err == nil {
t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err) t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err)
@ -83,7 +82,7 @@ func TestDecryptClientInfo(t *testing.T) {
return return
} }
nineSixSixUnder := time.Unix(1565998966, 0).Add(-(timestampTolerance + 10)) nineSixSixUnder := time.Unix(1565998966, 0).Add(TIMESTAMP_TOLERANCE - 10)
_, err = decryptClientInfo(ai, nineSixSixUnder) _, err = decryptClientInfo(ai, nineSixSixUnder)
if err == nil { if err == nil {
t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err) t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err)

View File

@ -6,22 +6,19 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"io" "io"
"net" "net"
"net/http" "net/http"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
var b64 = base64.StdEncoding.EncodeToString var b64 = base64.StdEncoding.EncodeToString
const firstPacketSize = 3000
func Serve(l net.Listener, sta *State) { func Serve(l net.Listener, sta *State) {
waitDur := [10]time.Duration{ waitDur := [10]time.Duration{
50 * time.Millisecond, 100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 50 * time.Millisecond, 100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second,
@ -126,7 +123,7 @@ func readFirstPacket(conn net.Conn, buf []byte, timeout time.Duration) (int, Tra
func dispatchConnection(conn net.Conn, sta *State) { func dispatchConnection(conn net.Conn, sta *State) {
var err error var err error
buf := make([]byte, firstPacketSize) buf := make([]byte, 1500)
i, transport, redirOnErr, err := readFirstPacket(conn, buf, 15*time.Second) i, transport, redirOnErr, err := readFirstPacket(conn, buf, 15*time.Second)
data := buf[:i] data := buf[:i]
@ -178,13 +175,7 @@ func dispatchConnection(conn net.Conn, sta *State) {
common.RandRead(sta.WorldState.Rand, sessionKey[:]) common.RandRead(sta.WorldState.Rand, sessionKey[:])
obfuscator, err := mux.MakeObfuscator(ci.EncryptionMethod, sessionKey) obfuscator, err := mux.MakeObfuscator(ci.EncryptionMethod, sessionKey)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.Error(err)
"remoteAddr": conn.RemoteAddr(),
"UID": b64(ci.UID),
"sessionId": ci.SessionId,
"proxyMethod": ci.ProxyMethod,
"encryptionMethod": ci.EncryptionMethod,
}).Error(err)
goWeb() goWeb()
return return
} }
@ -199,7 +190,7 @@ func dispatchConnection(conn net.Conn, sta *State) {
// adminUID can use the server as normal with unlimited QoS credits. The adminUID is not // adminUID can use the server as normal with unlimited QoS credits. The adminUID is not
// added to the userinfo database. The distinction between going into the admin mode // added to the userinfo database. The distinction between going into the admin mode
// and normal proxy mode is that sessionID needs == 0 for admin mode // and normal proxy mode is that sessionID needs == 0 for admin mode
if len(sta.AdminUID) != 0 && bytes.Equal(ci.UID, sta.AdminUID) && ci.SessionId == 0 { if bytes.Equal(ci.UID, sta.AdminUID) && ci.SessionId == 0 {
sesh := mux.MakeSession(0, seshConfig) sesh := mux.MakeSession(0, seshConfig)
preparedConn, err := finishHandshake(conn, sessionKey, sta.WorldState.Rand) preparedConn, err := finishHandshake(conn, sessionKey, sta.WorldState.Rand)
if err != nil { if err != nil {
@ -216,18 +207,6 @@ func dispatchConnection(conn net.Conn, sta *State) {
return return
} }
if _, ok := sta.ProxyBook[ci.ProxyMethod]; !ok {
log.WithFields(log.Fields{
"remoteAddr": conn.RemoteAddr(),
"UID": b64(ci.UID),
"sessionId": ci.SessionId,
"proxyMethod": ci.ProxyMethod,
"encryptionMethod": ci.EncryptionMethod,
}).Error(ErrBadProxyMethod)
goWeb()
return
}
var user *ActiveUser var user *ActiveUser
if sta.IsBypass(ci.UID) { if sta.IsBypass(ci.UID) {
user, err = sta.Panel.GetBypassUser(ci.UID) user, err = sta.Panel.GetBypassUser(ci.UID)
@ -251,7 +230,7 @@ func dispatchConnection(conn net.Conn, sta *State) {
return return
} }
preparedConn, err := finishHandshake(conn, sesh.GetSessionKey(), sta.WorldState.Rand) preparedConn, err := finishHandshake(conn, sesh.SessionKey, sta.WorldState.Rand)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
@ -296,6 +275,8 @@ func serveSession(sesh *mux.Session, ci ClientInfo, user *ActiveUser, sta *State
} }
log.Tracef("%v endpoint has been successfully connected", ci.ProxyMethod) log.Tracef("%v endpoint has been successfully connected", ci.ProxyMethod)
// if stream has nothing to send to proxy server for sta.Timeout period of time, stream will return error
newStream.(*mux.Stream).SetWriteToTimeout(sta.Timeout)
go func() { go func() {
if _, err := common.Copy(localConn, newStream); err != nil { if _, err := common.Copy(localConn, newStream); err != nil {
log.Tracef("copying stream to proxy server: %v", err) log.Tracef("copying stream to proxy server: %v", err)

View File

@ -2,13 +2,12 @@ package server
import ( import (
"encoding/hex" "encoding/hex"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
"io" "io"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
type rfpReturnValue struct { type rfpReturnValue struct {

View File

@ -1,15 +1,13 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package server package server
import ( import (
"errors" "errors"
"net"
"time"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/connutil" "github.com/cbeuw/connutil"
"net"
"time"
) )
type rfpReturnValue_fuzz struct { type rfpReturnValue_fuzz struct {

View File

@ -5,26 +5,26 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"io/ioutil" "io/ioutil"
"net" "net"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
) )
type RawConfig struct { type RawConfig struct {
ProxyBook map[string][]string ProxyBook map[string][]string
BindAddr []string BindAddr []string
BypassUID [][]byte BypassUID [][]byte
RedirAddr string RedirAddr string
PrivateKey []byte PrivateKey []byte
AdminUID []byte AdminUID []byte
DatabasePath string DatabasePath string
KeepAlive int StreamTimeout int
CncMode bool KeepAlive int
CncMode bool
} }
// State type stores the global state of the program // State type stores the global state of the program
@ -34,6 +34,8 @@ type State struct {
WorldState common.WorldState WorldState common.WorldState
AdminUID []byte AdminUID []byte
Timeout time.Duration
//KeepAlive time.Duration
BypassUID map[[16]byte]struct{} BypassUID map[[16]byte]struct{}
StaticPv crypto.PrivateKey StaticPv crypto.PrivateKey
@ -144,18 +146,19 @@ func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, er
err = errors.New("command & control mode not implemented") err = errors.New("command & control mode not implemented")
return return
} else { } else {
var manager usermanager.UserManager manager, err := usermanager.MakeLocalManager(preParse.DatabasePath, worldState)
if len(preParse.AdminUID) == 0 || preParse.DatabasePath == "" { if err != nil {
manager = &usermanager.Voidmanager{} return sta, err
} else {
manager, err = usermanager.MakeLocalManager(preParse.DatabasePath, worldState)
if err != nil {
return sta, err
}
} }
sta.Panel = MakeUserPanel(manager) sta.Panel = MakeUserPanel(manager)
} }
if preParse.StreamTimeout == 0 {
sta.Timeout = time.Duration(300) * time.Second
} else {
sta.Timeout = time.Duration(preParse.StreamTimeout) * time.Second
}
if preParse.KeepAlive <= 0 { if preParse.KeepAlive <= 0 {
sta.ProxyDialer = &net.Dialer{KeepAlive: -1} sta.ProxyDialer = &net.Dialer{KeepAlive: -1}
} else { } else {
@ -174,10 +177,6 @@ func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, er
return return
} }
if len(preParse.PrivateKey) == 0 {
err = fmt.Errorf("must have a valid private key. Run `ck-server -key` to generate one")
return
}
var pv [32]byte var pv [32]byte
copy(pv[:], preParse.PrivateKey) copy(pv[:], preParse.PrivateKey)
sta.StaticPv = &pv sta.StaticPv = &pv
@ -189,10 +188,8 @@ func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, er
copy(arrUID[:], UID) copy(arrUID[:], UID)
sta.BypassUID[arrUID] = struct{}{} sta.BypassUID[arrUID] = struct{}{}
} }
if len(sta.AdminUID) != 0 { copy(arrUID[:], sta.AdminUID)
copy(arrUID[:], sta.AdminUID) sta.BypassUID[arrUID] = struct{}{}
sta.BypassUID[arrUID] = struct{}{}
}
go sta.UsedRandomCleaner() go sta.UsedRandomCleaner()
return sta, nil return sta, nil
@ -206,17 +203,17 @@ func (sta *State) IsBypass(UID []byte) bool {
return exist return exist
} }
const timestampTolerance = 180 * time.Second const TIMESTAMP_TOLERANCE = 180 * time.Second
const replayCacheAgeLimit = 12 * time.Hour const CACHE_CLEAN_INTERVAL = 12 * time.Hour
// UsedRandomCleaner clears the cache of used random fields every replayCacheAgeLimit // UsedRandomCleaner clears the cache of used random fields every CACHE_CLEAN_INTERVAL
func (sta *State) UsedRandomCleaner() { func (sta *State) UsedRandomCleaner() {
for { for {
time.Sleep(replayCacheAgeLimit) time.Sleep(CACHE_CLEAN_INTERVAL)
sta.usedRandomM.Lock() sta.usedRandomM.Lock()
for key, t := range sta.UsedRandom { for key, t := range sta.UsedRandom {
if time.Unix(t, 0).Before(sta.WorldState.Now().Add(timestampTolerance)) { if time.Unix(t, 0).Before(sta.WorldState.Now().Add(TIMESTAMP_TOLERANCE)) {
delete(sta.UsedRandom, key) delete(sta.UsedRandom, key)
} }
} }

View File

@ -43,22 +43,13 @@ func TestParseRedirAddr(t *testing.T) {
t.Errorf("parsing %v error: %v", domainNoPort, err) t.Errorf("parsing %v error: %v", domainNoPort, err)
return return
} }
expHost, err := net.ResolveIPAddr("ip", "example.com")
expIPs, err := net.LookupIP("example.com")
if err != nil { if err != nil {
t.Errorf("tester error: cannot resolve example.com: %v", err) t.Errorf("tester error: cannot resolve example.com: %v", err)
return return
} }
if host.String() != expHost.String() {
contain := false t.Errorf("expected %v got %v", expHost.String(), host.String())
for _, expIP := range expIPs {
if expIP.String() == host.String() {
contain = true
}
}
if !contain {
t.Errorf("expected one of %v got %v", expIPs, host.String())
} }
if port != "" { if port != "" {
t.Errorf("port not empty when there is no port") t.Errorf("port not empty when there is no port")
@ -72,22 +63,13 @@ func TestParseRedirAddr(t *testing.T) {
t.Errorf("parsing %v error: %v", domainWPort, err) t.Errorf("parsing %v error: %v", domainWPort, err)
return return
} }
expHost, err := net.ResolveIPAddr("ip", "example.com")
expIPs, err := net.LookupIP("example.com")
if err != nil { if err != nil {
t.Errorf("tester error: cannot resolve example.com: %v", err) t.Errorf("tester error: cannot resolve example.com: %v", err)
return return
} }
if host.String() != expHost.String() {
contain := false t.Errorf("expected %v got %v", expHost.String(), host.String())
for _, expIP := range expIPs {
if expIP.String() == host.String() {
contain = true
}
}
if !contain {
t.Errorf("expected one of %v got %v", expIPs, host.String())
} }
if port != "80" { if port != "80" {
t.Errorf("wrong port: expected %v, got %v", "80", port) t.Errorf("wrong port: expected %v, got %v", "80", port)

View File

@ -2,7 +2,7 @@ swagger: '2.0'
info: info:
description: | description: |
This is the API of Cloak server This is the API of Cloak server
version: 0.0.2 version: 1.0.0
title: Cloak Server title: Cloak Server
contact: contact:
email: cbeuw.andy@gmail.com email: cbeuw.andy@gmail.com
@ -12,6 +12,8 @@ info:
# host: petstore.swagger.io # host: petstore.swagger.io
# basePath: /v2 # basePath: /v2
tags: tags:
- name: admin
description: Endpoints used by the host administrators
- name: users - name: users
description: Operations related to user controls by admin description: Operations related to user controls by admin
# schemes: # schemes:
@ -20,6 +22,7 @@ paths:
/admin/users: /admin/users:
get: get:
tags: tags:
- admin
- users - users
summary: Show all users summary: Show all users
description: Returns an array of all UserInfo description: Returns an array of all UserInfo
@ -38,6 +41,7 @@ paths:
/admin/users/{UID}: /admin/users/{UID}:
get: get:
tags: tags:
- admin
- users - users
summary: Show userinfo by UID summary: Show userinfo by UID
description: Returns a UserInfo object description: Returns a UserInfo object
@ -64,6 +68,7 @@ paths:
description: internal error description: internal error
post: post:
tags: tags:
- admin
- users - users
summary: Updates the userinfo of the specified user, if the user does not exist, then a new user is created summary: Updates the userinfo of the specified user, if the user does not exist, then a new user is created
operationId: writeUserInfo operationId: writeUserInfo
@ -95,6 +100,7 @@ paths:
description: internal error description: internal error
delete: delete:
tags: tags:
- admin
- users - users
summary: Deletes a user summary: Deletes a user
operationId: deleteUser operationId: deleteUser

View File

@ -4,13 +4,12 @@ import (
"bytes" "bytes"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"github.com/stretchr/testify/assert"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os" "os"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
var mockUIDb64 = base64.URLEncoding.EncodeToString(mockUID) var mockUIDb64 = base64.URLEncoding.EncodeToString(mockUID)
@ -47,36 +46,6 @@ func TestWriteUserInfoHlr(t *testing.T) {
assert.Equalf(t, http.StatusCreated, rr.Code, "response body: %v", rr.Body) assert.Equalf(t, http.StatusCreated, rr.Code, "response body: %v", rr.Body)
}) })
t.Run("partial update", func(t *testing.T) {
req, err := http.NewRequest("POST", "/admin/users/"+mockUIDb64, bytes.NewBuffer(marshalled))
assert.NoError(t, err)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equal(t, http.StatusCreated, rr.Code)
partialUserInfo := UserInfo{
UID: mockUID,
SessionsCap: JustInt32(10),
}
partialMarshalled, _ := json.Marshal(partialUserInfo)
req, err = http.NewRequest("POST", "/admin/users/"+mockUIDb64, bytes.NewBuffer(partialMarshalled))
assert.NoError(t, err)
router.ServeHTTP(rr, req)
assert.Equal(t, http.StatusCreated, rr.Code)
req, err = http.NewRequest("GET", "/admin/users/"+mockUIDb64, nil)
assert.NoError(t, err)
router.ServeHTTP(rr, req)
assert.Equal(t, http.StatusCreated, rr.Code)
var got UserInfo
err = json.Unmarshal(rr.Body.Bytes(), &got)
assert.NoError(t, err)
expected := mockUserInfo
expected.SessionsCap = partialUserInfo.SessionsCap
assert.EqualValues(t, expected, got)
})
t.Run("empty parameter", func(t *testing.T) { t.Run("empty parameter", func(t *testing.T) {
req, err := http.NewRequest("POST", "/admin/users/", bytes.NewBuffer(marshalled)) req, err := http.NewRequest("POST", "/admin/users/", bytes.NewBuffer(marshalled))
if err != nil { if err != nil {

View File

@ -2,7 +2,6 @@ package usermanager
import ( import (
"encoding/binary" "encoding/binary"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
@ -128,7 +127,6 @@ func (manager *localManager) UploadStatus(uploads []StatusUpdate) ([]StatusRespo
"User no longer exists", "User no longer exists",
} }
responses = append(responses, resp) responses = append(responses, resp)
continue
} }
oldUp := int64(u64(bucket.Get([]byte("UpCredit")))) oldUp := int64(u64(bucket.Get([]byte("UpCredit"))))
@ -181,20 +179,17 @@ func (manager *localManager) ListAllUsers() (infos []UserInfo, err error) {
err = tx.ForEach(func(UID []byte, bucket *bolt.Bucket) error { err = tx.ForEach(func(UID []byte, bucket *bolt.Bucket) error {
var uinfo UserInfo var uinfo UserInfo
uinfo.UID = UID uinfo.UID = UID
uinfo.SessionsCap = JustInt32(int32(u32(bucket.Get([]byte("SessionsCap"))))) uinfo.SessionsCap = int32(u32(bucket.Get([]byte("SessionsCap"))))
uinfo.UpRate = JustInt64(int64(u64(bucket.Get([]byte("UpRate"))))) uinfo.UpRate = int64(u64(bucket.Get([]byte("UpRate"))))
uinfo.DownRate = JustInt64(int64(u64(bucket.Get([]byte("DownRate"))))) uinfo.DownRate = int64(u64(bucket.Get([]byte("DownRate"))))
uinfo.UpCredit = JustInt64(int64(u64(bucket.Get([]byte("UpCredit"))))) uinfo.UpCredit = int64(u64(bucket.Get([]byte("UpCredit"))))
uinfo.DownCredit = JustInt64(int64(u64(bucket.Get([]byte("DownCredit"))))) uinfo.DownCredit = int64(u64(bucket.Get([]byte("DownCredit"))))
uinfo.ExpiryTime = JustInt64(int64(u64(bucket.Get([]byte("ExpiryTime"))))) uinfo.ExpiryTime = int64(u64(bucket.Get([]byte("ExpiryTime"))))
infos = append(infos, uinfo) infos = append(infos, uinfo)
return nil return nil
}) })
return err return err
}) })
if infos == nil {
infos = []UserInfo{}
}
return return
} }
@ -205,52 +200,40 @@ func (manager *localManager) GetUserInfo(UID []byte) (uinfo UserInfo, err error)
return ErrUserNotFound return ErrUserNotFound
} }
uinfo.UID = UID uinfo.UID = UID
uinfo.SessionsCap = JustInt32(int32(u32(bucket.Get([]byte("SessionsCap"))))) uinfo.SessionsCap = int32(u32(bucket.Get([]byte("SessionsCap"))))
uinfo.UpRate = JustInt64(int64(u64(bucket.Get([]byte("UpRate"))))) uinfo.UpRate = int64(u64(bucket.Get([]byte("UpRate"))))
uinfo.DownRate = JustInt64(int64(u64(bucket.Get([]byte("DownRate"))))) uinfo.DownRate = int64(u64(bucket.Get([]byte("DownRate"))))
uinfo.UpCredit = JustInt64(int64(u64(bucket.Get([]byte("UpCredit"))))) uinfo.UpCredit = int64(u64(bucket.Get([]byte("UpCredit"))))
uinfo.DownCredit = JustInt64(int64(u64(bucket.Get([]byte("DownCredit"))))) uinfo.DownCredit = int64(u64(bucket.Get([]byte("DownCredit"))))
uinfo.ExpiryTime = JustInt64(int64(u64(bucket.Get([]byte("ExpiryTime"))))) uinfo.ExpiryTime = int64(u64(bucket.Get([]byte("ExpiryTime"))))
return nil return nil
}) })
return return
} }
func (manager *localManager) WriteUserInfo(u UserInfo) (err error) { func (manager *localManager) WriteUserInfo(uinfo UserInfo) (err error) {
err = manager.db.Update(func(tx *bolt.Tx) error { err = manager.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(u.UID) bucket, err := tx.CreateBucketIfNotExists(uinfo.UID)
if err != nil { if err != nil {
return err return err
} }
if u.SessionsCap != nil { if err = bucket.Put([]byte("SessionsCap"), i32ToB(int32(uinfo.SessionsCap))); err != nil {
if err = bucket.Put([]byte("SessionsCap"), i32ToB(*u.SessionsCap)); err != nil { return err
return err
}
} }
if u.UpRate != nil { if err = bucket.Put([]byte("UpRate"), i64ToB(uinfo.UpRate)); err != nil {
if err = bucket.Put([]byte("UpRate"), i64ToB(*u.UpRate)); err != nil { return err
return err
}
} }
if u.DownRate != nil { if err = bucket.Put([]byte("DownRate"), i64ToB(uinfo.DownRate)); err != nil {
if err = bucket.Put([]byte("DownRate"), i64ToB(*u.DownRate)); err != nil { return err
return err
}
} }
if u.UpCredit != nil { if err = bucket.Put([]byte("UpCredit"), i64ToB(uinfo.UpCredit)); err != nil {
if err = bucket.Put([]byte("UpCredit"), i64ToB(*u.UpCredit)); err != nil { return err
return err
}
} }
if u.DownCredit != nil { if err = bucket.Put([]byte("DownCredit"), i64ToB(uinfo.DownCredit)); err != nil {
if err = bucket.Put([]byte("DownCredit"), i64ToB(*u.DownCredit)); err != nil { return err
return err
}
} }
if u.ExpiryTime != nil { if err = bucket.Put([]byte("ExpiryTime"), i64ToB(uinfo.ExpiryTime)); err != nil {
if err = bucket.Put([]byte("ExpiryTime"), i64ToB(*u.ExpiryTime)); err != nil { return err
return err
}
} }
return nil return nil
}) })

View File

@ -2,6 +2,7 @@ package usermanager
import ( import (
"encoding/binary" "encoding/binary"
"github.com/cbeuw/Cloak/internal/common"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
"os" "os"
@ -10,21 +11,18 @@ import (
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/stretchr/testify/assert"
) )
var mockUID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} var mockUID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
var mockWorldState = common.WorldOfTime(time.Unix(1, 0)) var mockWorldState = common.WorldOfTime(time.Unix(1, 0))
var mockUserInfo = UserInfo{ var mockUserInfo = UserInfo{
UID: mockUID, UID: mockUID,
SessionsCap: JustInt32(10), SessionsCap: 0,
UpRate: JustInt64(100), UpRate: 0,
DownRate: JustInt64(1000), DownRate: 0,
UpCredit: JustInt64(10000), UpCredit: 0,
DownCredit: JustInt64(100000), DownCredit: 0,
ExpiryTime: JustInt64(1000000), ExpiryTime: 100,
} }
func makeManager(t *testing.T) (mgr *localManager, cleaner func()) { func makeManager(t *testing.T) (mgr *localManager, cleaner func()) {
@ -45,23 +43,6 @@ func TestLocalManager_WriteUserInfo(t *testing.T) {
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
got, err := mgr.GetUserInfo(mockUID)
assert.NoError(t, err)
assert.EqualValues(t, mockUserInfo, got)
/* Partial update */
err = mgr.WriteUserInfo(UserInfo{
UID: mockUID,
SessionsCap: JustInt32(*mockUserInfo.SessionsCap + 1),
})
assert.NoError(t, err)
expected := mockUserInfo
expected.SessionsCap = JustInt32(*mockUserInfo.SessionsCap + 1)
got, err = mgr.GetUserInfo(mockUID)
assert.NoError(t, err)
assert.EqualValues(t, expected, got)
} }
func TestLocalManager_GetUserInfo(t *testing.T) { func TestLocalManager_GetUserInfo(t *testing.T) {
@ -82,7 +63,7 @@ func TestLocalManager_GetUserInfo(t *testing.T) {
t.Run("update a field", func(t *testing.T) { t.Run("update a field", func(t *testing.T) {
_ = mgr.WriteUserInfo(mockUserInfo) _ = mgr.WriteUserInfo(mockUserInfo)
updatedUserInfo := mockUserInfo updatedUserInfo := mockUserInfo
updatedUserInfo.SessionsCap = JustInt32(*mockUserInfo.SessionsCap + 1) updatedUserInfo.SessionsCap = mockUserInfo.SessionsCap + 1
err := mgr.WriteUserInfo(updatedUserInfo) err := mgr.WriteUserInfo(updatedUserInfo)
if err != nil { if err != nil {
@ -122,7 +103,15 @@ func TestLocalManager_DeleteUser(t *testing.T) {
} }
} }
var validUserInfo = mockUserInfo var validUserInfo = UserInfo{
UID: mockUID,
SessionsCap: 10,
UpRate: 100,
DownRate: 1000,
UpCredit: 10000,
DownCredit: 100000,
ExpiryTime: 1000000,
}
func TestLocalManager_AuthenticateUser(t *testing.T) { func TestLocalManager_AuthenticateUser(t *testing.T) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info") var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
@ -139,7 +128,7 @@ func TestLocalManager_AuthenticateUser(t *testing.T) {
t.Error(err) t.Error(err)
} }
if upRate != *validUserInfo.UpRate || downRate != *validUserInfo.DownRate { if upRate != validUserInfo.UpRate || downRate != validUserInfo.DownRate {
t.Error("wrong up or down rate") t.Error("wrong up or down rate")
} }
}) })
@ -153,7 +142,7 @@ func TestLocalManager_AuthenticateUser(t *testing.T) {
t.Run("expired user", func(t *testing.T) { t.Run("expired user", func(t *testing.T) {
expiredUserInfo := validUserInfo expiredUserInfo := validUserInfo
expiredUserInfo.ExpiryTime = JustInt64(mockWorldState.Now().Add(-10 * time.Second).Unix()) expiredUserInfo.ExpiryTime = mockWorldState.Now().Add(-10 * time.Second).Unix()
_ = mgr.WriteUserInfo(expiredUserInfo) _ = mgr.WriteUserInfo(expiredUserInfo)
@ -165,7 +154,7 @@ func TestLocalManager_AuthenticateUser(t *testing.T) {
t.Run("no credit", func(t *testing.T) { t.Run("no credit", func(t *testing.T) {
creditlessUserInfo := validUserInfo creditlessUserInfo := validUserInfo
creditlessUserInfo.UpCredit, creditlessUserInfo.DownCredit = JustInt64(-1), JustInt64(-1) creditlessUserInfo.UpCredit, creditlessUserInfo.DownCredit = -1, -1
_ = mgr.WriteUserInfo(creditlessUserInfo) _ = mgr.WriteUserInfo(creditlessUserInfo)
@ -197,7 +186,7 @@ func TestLocalManager_AuthoriseNewSession(t *testing.T) {
t.Run("expired user", func(t *testing.T) { t.Run("expired user", func(t *testing.T) {
expiredUserInfo := validUserInfo expiredUserInfo := validUserInfo
expiredUserInfo.ExpiryTime = JustInt64(mockWorldState.Now().Add(-10 * time.Second).Unix()) expiredUserInfo.ExpiryTime = mockWorldState.Now().Add(-10 * time.Second).Unix()
_ = mgr.WriteUserInfo(expiredUserInfo) _ = mgr.WriteUserInfo(expiredUserInfo)
err := mgr.AuthoriseNewSession(expiredUserInfo.UID, AuthorisationInfo{NumExistingSessions: 0}) err := mgr.AuthoriseNewSession(expiredUserInfo.UID, AuthorisationInfo{NumExistingSessions: 0})
@ -208,7 +197,7 @@ func TestLocalManager_AuthoriseNewSession(t *testing.T) {
t.Run("too many sessions", func(t *testing.T) { t.Run("too many sessions", func(t *testing.T) {
_ = mgr.WriteUserInfo(validUserInfo) _ = mgr.WriteUserInfo(validUserInfo)
err := mgr.AuthoriseNewSession(validUserInfo.UID, AuthorisationInfo{NumExistingSessions: int(*validUserInfo.SessionsCap + 1)}) err := mgr.AuthoriseNewSession(validUserInfo.UID, AuthorisationInfo{NumExistingSessions: int(validUserInfo.SessionsCap + 1)})
if err != ErrSessionsCapReached { if err != ErrSessionsCapReached {
t.Error("session cap not reached") t.Error("session cap not reached")
} }
@ -241,10 +230,10 @@ func TestLocalManager_UploadStatus(t *testing.T) {
t.Error(err) t.Error(err)
} }
if *updatedUserInfo.UpCredit != *validUserInfo.UpCredit-update.UpUsage { if updatedUserInfo.UpCredit != validUserInfo.UpCredit-update.UpUsage {
t.Error("up usage incorrect") t.Error("up usage incorrect")
} }
if *updatedUserInfo.DownCredit != *validUserInfo.DownCredit-update.DownUsage { if updatedUserInfo.DownCredit != validUserInfo.DownCredit-update.DownUsage {
t.Error("down usage incorrect") t.Error("down usage incorrect")
} }
}) })
@ -260,7 +249,7 @@ func TestLocalManager_UploadStatus(t *testing.T) {
UID: validUserInfo.UID, UID: validUserInfo.UID,
Active: true, Active: true,
NumSession: 1, NumSession: 1,
UpUsage: *validUserInfo.UpCredit + 100, UpUsage: validUserInfo.UpCredit + 100,
DownUsage: 0, DownUsage: 0,
Timestamp: mockWorldState.Now().Unix(), Timestamp: mockWorldState.Now().Unix(),
}, },
@ -272,19 +261,19 @@ func TestLocalManager_UploadStatus(t *testing.T) {
Active: true, Active: true,
NumSession: 1, NumSession: 1,
UpUsage: 0, UpUsage: 0,
DownUsage: *validUserInfo.DownCredit + 100, DownUsage: validUserInfo.DownCredit + 100,
Timestamp: mockWorldState.Now().Unix(), Timestamp: mockWorldState.Now().Unix(),
}, },
}, },
{"expired", {"expired",
UserInfo{ UserInfo{
UID: mockUID, UID: mockUID,
SessionsCap: JustInt32(10), SessionsCap: 10,
UpRate: JustInt64(0), UpRate: 0,
DownRate: JustInt64(0), DownRate: 0,
UpCredit: JustInt64(0), UpCredit: 0,
DownCredit: JustInt64(0), DownCredit: 0,
ExpiryTime: JustInt64(-1), ExpiryTime: -1,
}, },
StatusUpdate{ StatusUpdate{
UID: mockUserInfo.UID, UID: mockUserInfo.UID,
@ -329,12 +318,12 @@ func TestLocalManager_ListAllUsers(t *testing.T) {
rand.Read(randUID) rand.Read(randUID)
newUser := UserInfo{ newUser := UserInfo{
UID: randUID, UID: randUID,
SessionsCap: JustInt32(rand.Int31()), SessionsCap: rand.Int31(),
UpRate: JustInt64(rand.Int63()), UpRate: rand.Int63(),
DownRate: JustInt64(rand.Int63()), DownRate: rand.Int63(),
UpCredit: JustInt64(rand.Int63()), UpCredit: rand.Int63(),
DownCredit: JustInt64(rand.Int63()), DownCredit: rand.Int63(),
ExpiryTime: JustInt64(rand.Int63()), ExpiryTime: rand.Int63(),
} }
users = append(users, newUser) users = append(users, newUser)
wg.Add(1) wg.Add(1)

View File

@ -14,23 +14,16 @@ type StatusUpdate struct {
Timestamp int64 Timestamp int64
} }
type MaybeInt32 *int32
type MaybeInt64 *int64
type UserInfo struct { type UserInfo struct {
UID []byte UID []byte
SessionsCap MaybeInt32 SessionsCap int32
UpRate MaybeInt64 UpRate int64
DownRate MaybeInt64 DownRate int64
UpCredit MaybeInt64 UpCredit int64
DownCredit MaybeInt64 DownCredit int64
ExpiryTime MaybeInt64 ExpiryTime int64
} }
func JustInt32(v int32) MaybeInt32 { return &v }
func JustInt64(v int64) MaybeInt64 { return &v }
type StatusResponse struct { type StatusResponse struct {
UID []byte UID []byte
Action int Action int
@ -47,7 +40,6 @@ const (
var ErrUserNotFound = errors.New("UID does not correspond to a user") var ErrUserNotFound = errors.New("UID does not correspond to a user")
var ErrSessionsCapReached = errors.New("Sessions cap has reached") var ErrSessionsCapReached = errors.New("Sessions cap has reached")
var ErrMangerIsVoid = errors.New("cannot perform operation with user manager as database path is not specified")
var ErrNoUpCredit = errors.New("No upload credit left") var ErrNoUpCredit = errors.New("No upload credit left")
var ErrNoDownCredit = errors.New("No download credit left") var ErrNoDownCredit = errors.New("No download credit left")

View File

@ -1,31 +0,0 @@
package usermanager
type Voidmanager struct{}
func (v *Voidmanager) AuthenticateUser(bytes []byte) (int64, int64, error) {
return 0, 0, ErrMangerIsVoid
}
func (v *Voidmanager) AuthoriseNewSession(bytes []byte, info AuthorisationInfo) error {
return ErrMangerIsVoid
}
func (v *Voidmanager) UploadStatus(updates []StatusUpdate) ([]StatusResponse, error) {
return nil, ErrMangerIsVoid
}
func (v *Voidmanager) ListAllUsers() ([]UserInfo, error) {
return []UserInfo{}, ErrMangerIsVoid
}
func (v *Voidmanager) GetUserInfo(UID []byte) (UserInfo, error) {
return UserInfo{}, ErrMangerIsVoid
}
func (v *Voidmanager) WriteUserInfo(info UserInfo) error {
return ErrMangerIsVoid
}
func (v *Voidmanager) DeleteUser(UID []byte) error {
return ErrMangerIsVoid
}

View File

@ -1,44 +0,0 @@
package usermanager
import (
"testing"
"github.com/stretchr/testify/assert"
)
var v = &Voidmanager{}
func Test_Voidmanager_AuthenticateUser(t *testing.T) {
_, _, err := v.AuthenticateUser([]byte{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_AuthoriseNewSession(t *testing.T) {
err := v.AuthoriseNewSession([]byte{}, AuthorisationInfo{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_DeleteUser(t *testing.T) {
err := v.DeleteUser([]byte{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_GetUserInfo(t *testing.T) {
_, err := v.GetUserInfo([]byte{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_ListAllUsers(t *testing.T) {
_, err := v.ListAllUsers()
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_UploadStatus(t *testing.T) {
_, err := v.UploadStatus([]StatusUpdate{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_WriteUserInfo(t *testing.T) {
err := v.WriteUserInfo(UserInfo{})
assert.Equal(t, ErrMangerIsVoid, err)
}

View File

@ -2,12 +2,11 @@ package server
import ( import (
"encoding/base64" "encoding/base64"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/cbeuw/Cloak/internal/server/usermanager"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -186,9 +185,6 @@ func (panel *userPanel) commitUpdate() error {
panel.usageUpdateQueue = make(map[[16]byte]*usagePair) panel.usageUpdateQueue = make(map[[16]byte]*usagePair)
panel.usageUpdateQueueM.Unlock() panel.usageUpdateQueueM.Unlock()
if len(statuses) == 0 {
return nil
}
responses, err := panel.Manager.UploadStatus(statuses) responses, err := panel.Manager.UploadStatus(statuses)
if err != nil { if err != nil {
return err return err

View File

@ -2,13 +2,12 @@ package server
import ( import (
"encoding/base64" "encoding/base64"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
) )
func TestUserPanel_BypassUser(t *testing.T) { func TestUserPanel_BypassUser(t *testing.T) {
@ -67,12 +66,12 @@ var mockUID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
var mockWorldState = common.WorldOfTime(time.Unix(1, 0)) var mockWorldState = common.WorldOfTime(time.Unix(1, 0))
var validUserInfo = usermanager.UserInfo{ var validUserInfo = usermanager.UserInfo{
UID: mockUID, UID: mockUID,
SessionsCap: usermanager.JustInt32(10), SessionsCap: 10,
UpRate: usermanager.JustInt64(100), UpRate: 100,
DownRate: usermanager.JustInt64(1000), DownRate: 1000,
UpCredit: usermanager.JustInt64(10000), UpCredit: 10000,
DownCredit: usermanager.JustInt64(100000), DownCredit: 100000,
ExpiryTime: usermanager.JustInt64(1000000), ExpiryTime: 1000000,
} }
func TestUserPanel_GetUser(t *testing.T) { func TestUserPanel_GetUser(t *testing.T) {
@ -139,10 +138,10 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
} }
updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID) updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID)
if *updatedUinfo.DownCredit != *validUserInfo.DownCredit-1 { if updatedUinfo.DownCredit != validUserInfo.DownCredit-1 {
t.Error("down credit incorrect update") t.Error("down credit incorrect update")
} }
if *updatedUinfo.UpCredit != *validUserInfo.UpCredit-2 { if updatedUinfo.UpCredit != validUserInfo.UpCredit-2 {
t.Error("up credit incorrect update") t.Error("up credit incorrect update")
} }
@ -156,10 +155,10 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
} }
updatedUinfo, _ = mgr.GetUserInfo(validUserInfo.UID) updatedUinfo, _ = mgr.GetUserInfo(validUserInfo.UID)
if *updatedUinfo.DownCredit != *validUserInfo.DownCredit-(1+3) { if updatedUinfo.DownCredit != validUserInfo.DownCredit-(1+3) {
t.Error("down credit incorrect update") t.Error("down credit incorrect update")
} }
if *updatedUinfo.UpCredit != *validUserInfo.UpCredit-(2+4) { if updatedUinfo.UpCredit != validUserInfo.UpCredit-(2+4) {
t.Error("up credit incorrect update") t.Error("up credit incorrect update")
} }
}) })
@ -171,7 +170,7 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
t.Error(err) t.Error(err)
} }
user.valve.AddTx(*validUserInfo.DownCredit + 100) user.valve.AddTx(validUserInfo.DownCredit + 100)
panel.updateUsageQueue() panel.updateUsageQueue()
err = panel.commitUpdate() err = panel.commitUpdate()
if err != nil { if err != nil {
@ -183,7 +182,7 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
} }
updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID) updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID)
if *updatedUinfo.DownCredit != -100 { if updatedUinfo.DownCredit != -100 {
t.Error("down credit not updated correctly after the user has been terminated") t.Error("down credit not updated correctly after the user has been terminated")
} }
}) })

View File

@ -7,12 +7,11 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh"
"io" "io"
"net" "net"
"net/http" "net/http"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh"
) )
type WebSocket struct{} type WebSocket struct{}
@ -85,13 +84,7 @@ func (WebSocket) unmarshalHidden(hidden []byte, staticPv crypto.PrivateKey) (fra
return return
} }
var sharedSecret []byte copy(fragments.sharedSecret[:], ecdh.GenerateSharedSecret(staticPv, ephPub))
sharedSecret, err = ecdh.GenerateSharedSecret(staticPv, ephPub)
if err != nil {
return
}
copy(fragments.sharedSecret[:], sharedSecret)
if len(hidden[32:]) != 64 { if len(hidden[32:]) != 64 {
err = fmt.Errorf("%v: %v", ErrCiphertextLength, len(hidden[32:])) err = fmt.Errorf("%v: %v", ErrCiphertextLength, len(hidden[32:]))

View File

@ -2,11 +2,10 @@ package server
import ( import (
"errors" "errors"
"net"
"net/http"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"net"
"net/http"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )

View File

@ -2,9 +2,8 @@ package server
import ( import (
"bytes" "bytes"
"testing"
"github.com/cbeuw/connutil" "github.com/cbeuw/connutil"
"testing"
) )
func TestFirstBuffedConn_Read(t *testing.T) { func TestFirstBuffedConn_Read(t *testing.T) {

View File

@ -5,24 +5,26 @@ import (
"encoding/base64" "encoding/base64"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io"
"math/rand"
"net"
"sync"
"testing"
"time"
"github.com/cbeuw/Cloak/internal/client" "github.com/cbeuw/Cloak/internal/client"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
"github.com/cbeuw/Cloak/internal/server" "github.com/cbeuw/Cloak/internal/server"
"github.com/cbeuw/connutil" "github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert" "io"
"io/ioutil"
"math/rand"
"net"
"os"
"sync"
"testing"
"time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
const numConns = 200 // -race option limits the number of goroutines to 8192 const numConns = 200 // -race option limits the number of goroutines to 8192
const delayBeforeTestingConnClose = 500 * time.Millisecond
const connCloseRetries = 3
func serveTCPEcho(l net.Listener) { func serveTCPEcho(l net.Listener) {
for { for {
@ -31,14 +33,15 @@ func serveTCPEcho(l net.Listener) {
log.Error(err) log.Error(err)
return return
} }
go func(conn net.Conn) { go func() {
conn := conn
_, err := io.Copy(conn, conn) _, err := io.Copy(conn, conn)
if err != nil { if err != nil {
conn.Close() conn.Close()
log.Error(err) log.Error(err)
return return
} }
}(conn) }()
} }
} }
@ -50,7 +53,8 @@ func serveUDPEcho(listener *connutil.PipeListener) {
return return
} }
const bufSize = 32 * 1024 const bufSize = 32 * 1024
go func(conn net.PacketConn) { go func() {
conn := conn
defer conn.Close() defer conn.Close()
buf := make([]byte, bufSize) buf := make([]byte, bufSize)
for { for {
@ -69,7 +73,7 @@ func serveUDPEcho(listener *connutil.PipeListener) {
return return
} }
} }
}(conn) }()
} }
} }
@ -121,7 +125,7 @@ var singleplexTCPConfig = client.RawConfig{
RemotePort: "9999", RemotePort: "9999",
LocalHost: "127.0.0.1", LocalHost: "127.0.0.1",
LocalPort: "9999", LocalPort: "9999",
BrowserSig: "safari", BrowserSig: "chrome",
} }
func generateClientConfigs(rawConfig client.RawConfig, state common.WorldState) (client.LocalConnConfig, client.RemoteConnConfig, client.AuthInfo) { func generateClientConfigs(rawConfig client.RawConfig, state common.WorldState) (client.LocalConnConfig, client.RemoteConnConfig, client.AuthInfo) {
@ -132,15 +136,18 @@ func generateClientConfigs(rawConfig client.RawConfig, state common.WorldState)
return lcl, rmt, auth return lcl, rmt, auth
} }
func basicServerState(ws common.WorldState) *server.State { func basicServerState(ws common.WorldState, db *os.File) *server.State {
var serverConfig = server.RawConfig{ var serverConfig = server.RawConfig{
ProxyBook: map[string][]string{"shadowsocks": {"tcp", "fake.com:9999"}, "openvpn": {"udp", "fake.com:9999"}}, ProxyBook: map[string][]string{"shadowsocks": {"tcp", "fake.com:9999"}, "openvpn": {"udp", "fake.com:9999"}},
BindAddr: []string{"fake.com:9999"}, BindAddr: []string{"fake.com:9999"},
BypassUID: [][]byte{bypassUID[:]}, BypassUID: [][]byte{bypassUID[:]},
RedirAddr: "fake.com:9999", RedirAddr: "fake.com:9999",
PrivateKey: privateKey, PrivateKey: privateKey,
KeepAlive: 15, AdminUID: nil,
CncMode: false, DatabasePath: db.Name(),
StreamTimeout: 300,
KeepAlive: 15,
CncMode: false,
} }
state, err := server.InitState(serverConfig, ws) state, err := server.InitState(serverConfig, ws)
if err != nil { if err != nil {
@ -179,9 +186,7 @@ func establishSession(lcc client.LocalConnConfig, rcc client.RemoteConnConfig, a
// whatever connection initiator (including a proper ck-client) // whatever connection initiator (including a proper ck-client)
netToCkServerD, ckServerListener := connutil.DialerListener(10 * 1024) netToCkServerD, ckServerListener := connutil.DialerListener(10 * 1024)
clientSeshMaker := func() *mux.Session { clientSeshMaker := func() *mux.Session {
ai := ai
quad := make([]byte, 4) quad := make([]byte, 4)
common.RandRead(ai.WorldState.Rand, quad) common.RandRead(ai.WorldState.Rand, quad)
ai.SessionId = binary.BigEndian.Uint32(quad) ai.SessionId = binary.BigEndian.Uint32(quad)
@ -202,12 +207,12 @@ func establishSession(lcc client.LocalConnConfig, rcc client.RemoteConnConfig, a
addrCh <- conn.LocalAddr().(*net.UDPAddr) addrCh <- conn.LocalAddr().(*net.UDPAddr)
return conn, err return conn, err
} }
go client.RouteUDP(acceptor, lcc.Timeout, rcc.Singleplex, clientSeshMaker) go client.RouteUDP(acceptor, lcc.Timeout, clientSeshMaker)
proxyToCkClientD = mDialer proxyToCkClientD = mDialer
} else { } else {
var proxyToCkClientL *connutil.PipeListener var proxyToCkClientL *connutil.PipeListener
proxyToCkClientD, proxyToCkClientL = connutil.DialerListener(10 * 1024) proxyToCkClientD, proxyToCkClientL = connutil.DialerListener(10 * 1024)
go client.RouteTCP(proxyToCkClientL, lcc.Timeout, rcc.Singleplex, clientSeshMaker) go client.RouteTCP(proxyToCkClientL, lcc.Timeout, clientSeshMaker)
} }
// set up server // set up server
@ -221,46 +226,43 @@ func establishSession(lcc client.LocalConnConfig, rcc client.RemoteConnConfig, a
return proxyToCkClientD, proxyFromCkServerL, netToCkServerD, redirFromCkServerL, nil return proxyToCkClientD, proxyFromCkServerL, netToCkServerD, redirFromCkServerL, nil
} }
func runEchoTest(t *testing.T, conns []net.Conn, msgLen int) { func runEchoTest(t *testing.T, conns []net.Conn, maxMsgLen int) {
var wg sync.WaitGroup var wg sync.WaitGroup
for _, conn := range conns { for _, conn := range conns {
wg.Add(1) wg.Add(1)
go func(conn net.Conn) { go func(conn net.Conn) {
defer wg.Done() testDataLen := rand.Intn(maxMsgLen)
testData := make([]byte, testDataLen)
testData := make([]byte, msgLen)
rand.Read(testData) rand.Read(testData)
// we cannot call t.Fatalf in concurrent contexts
n, err := conn.Write(testData) n, err := conn.Write(testData)
if n != msgLen { if n != testDataLen {
t.Errorf("written only %v, err %v", n, err) t.Fatalf("written only %v, err %v", n, err)
return
} }
recvBuf := make([]byte, msgLen) recvBuf := make([]byte, testDataLen)
_, err = io.ReadFull(conn, recvBuf) _, err = io.ReadFull(conn, recvBuf)
if err != nil { if err != nil {
t.Errorf("failed to read back: %v", err) t.Fatalf("failed to read back: %v", err)
return
} }
if !bytes.Equal(testData, recvBuf) { if !bytes.Equal(testData, recvBuf) {
t.Errorf("echoed data not correct") t.Fatalf("echoed data not correct")
return
} }
wg.Done()
}(conn) }(conn)
} }
wg.Wait() wg.Wait()
} }
func TestUDP(t *testing.T) { func TestUDP(t *testing.T) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
log.SetLevel(log.ErrorLevel) log.SetLevel(log.ErrorLevel)
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(basicUDPConfig, worldState) lcc, rcc, ai := generateClientConfigs(basicUDPConfig, worldState)
sta := basicServerState(worldState) sta := basicServerState(worldState, tmpDB)
proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta) proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
if err != nil { if err != nil {
@ -298,7 +300,6 @@ func TestUDP(t *testing.T) {
} }
}) })
const echoMsgLen = 1024
t.Run("user echo", func(t *testing.T) { t.Run("user echo", func(t *testing.T) {
go serveUDPEcho(proxyFromCkServerL) go serveUDPEcho(proxyFromCkServerL)
var conn [1]net.Conn var conn [1]net.Conn
@ -307,7 +308,7 @@ func TestUDP(t *testing.T) {
t.Error(err) t.Error(err)
} }
runEchoTest(t, conn[:], echoMsgLen) runEchoTest(t, conn[:], 1024)
}) })
} }
@ -316,20 +317,21 @@ func TestTCPSingleplex(t *testing.T) {
log.SetLevel(log.ErrorLevel) log.SetLevel(log.ErrorLevel)
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(singleplexTCPConfig, worldState) lcc, rcc, ai := generateClientConfigs(singleplexTCPConfig, worldState)
sta := basicServerState(worldState) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
sta := basicServerState(worldState, tmpDB)
proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta) proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
const echoMsgLen = 1 << 16
go serveTCPEcho(proxyFromCkServerL) go serveTCPEcho(proxyFromCkServerL)
proxyConn1, err := proxyToCkClientD.Dial("", "") proxyConn1, err := proxyToCkClientD.Dial("", "")
if err != nil { if err != nil {
t.Fatal(err) t.Error(err)
} }
runEchoTest(t, []net.Conn{proxyConn1}, echoMsgLen) runEchoTest(t, []net.Conn{proxyConn1}, 65536)
user, err := sta.Panel.GetUser(ai.UID[:]) user, err := sta.Panel.GetUser(ai.UID[:])
if err != nil { if err != nil {
t.Fatalf("failed to fetch user: %v", err) t.Fatalf("failed to fetch user: %v", err)
@ -341,34 +343,42 @@ func TestTCPSingleplex(t *testing.T) {
proxyConn2, err := proxyToCkClientD.Dial("", "") proxyConn2, err := proxyToCkClientD.Dial("", "")
if err != nil { if err != nil {
t.Fatal(err) t.Error(err)
} }
runEchoTest(t, []net.Conn{proxyConn2}, echoMsgLen) runEchoTest(t, []net.Conn{proxyConn2}, 65536)
if user.NumSession() != 2 { if user.NumSession() != 2 {
t.Error("no extra session were made on second connection establishment") t.Error("no extra session were made on second connection establishment")
} }
// Both conns should work // Both conns should work
runEchoTest(t, []net.Conn{proxyConn1, proxyConn2}, echoMsgLen) runEchoTest(t, []net.Conn{proxyConn1, proxyConn2}, 65536)
proxyConn1.Close() proxyConn1.Close()
assert.Eventually(t, func() bool { retries := 0
return user.NumSession() == 1 retry:
}, time.Second, 10*time.Millisecond, "first session was not closed on connection close") time.Sleep(delayBeforeTestingConnClose)
if user.NumSession() != 1 {
retries++
if retries > connCloseRetries {
t.Error("first session was not closed on connection close")
} else {
goto retry
}
}
// conn2 should still work // conn2 should still work
runEchoTest(t, []net.Conn{proxyConn2}, echoMsgLen) runEchoTest(t, []net.Conn{proxyConn2}, 65536)
var conns [numConns]net.Conn var conns [numConns]net.Conn
for i := 0; i < numConns; i++ { for i := 0; i < numConns; i++ {
conns[i], err = proxyToCkClientD.Dial("", "") conns[i], err = proxyToCkClientD.Dial("", "")
if err != nil { if err != nil {
t.Fatal(err) t.Error(err)
} }
} }
runEchoTest(t, conns[:], echoMsgLen) runEchoTest(t, conns[:], 65536)
} }
@ -377,7 +387,9 @@ func TestTCPMultiplex(t *testing.T) {
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(basicTCPConfig, worldState) lcc, rcc, ai := generateClientConfigs(basicTCPConfig, worldState)
sta := basicServerState(worldState) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
sta := basicServerState(worldState, tmpDB)
proxyToCkClientD, proxyFromCkServerL, netToCkServerD, redirFromCkServerL, err := establishSession(lcc, rcc, ai, sta) proxyToCkClientD, proxyFromCkServerL, netToCkServerD, redirFromCkServerL, err := establishSession(lcc, rcc, ai, sta)
if err != nil { if err != nil {
@ -416,7 +428,6 @@ func TestTCPMultiplex(t *testing.T) {
} }
}) })
const echoMsgLen = 16384
t.Run("user echo", func(t *testing.T) { t.Run("user echo", func(t *testing.T) {
go serveTCPEcho(proxyFromCkServerL) go serveTCPEcho(proxyFromCkServerL)
var conns [numConns]net.Conn var conns [numConns]net.Conn
@ -427,7 +438,7 @@ func TestTCPMultiplex(t *testing.T) {
} }
} }
runEchoTest(t, conns[:], echoMsgLen) runEchoTest(t, conns[:], 65536)
}) })
t.Run("redir echo", func(t *testing.T) { t.Run("redir echo", func(t *testing.T) {
@ -439,7 +450,7 @@ func TestTCPMultiplex(t *testing.T) {
t.Error(err) t.Error(err)
} }
} }
runEchoTest(t, conns[:], echoMsgLen) runEchoTest(t, conns[:], 65536)
}) })
} }
@ -451,8 +462,11 @@ func TestClosingStreamsFromProxy(t *testing.T) {
clientConfig := clientConfig clientConfig := clientConfig
clientConfigName := clientConfigName clientConfigName := clientConfigName
t.Run(clientConfigName, func(t *testing.T) { t.Run(clientConfigName, func(t *testing.T) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
lcc, rcc, ai := generateClientConfigs(clientConfig, worldState) lcc, rcc, ai := generateClientConfigs(clientConfig, worldState)
sta := basicServerState(worldState) sta := basicServerState(worldState, tmpDB)
proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta) proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -464,10 +478,17 @@ func TestClosingStreamsFromProxy(t *testing.T) {
serverConn, _ := proxyFromCkServerL.Accept() serverConn, _ := proxyFromCkServerL.Accept()
serverConn.Close() serverConn.Close()
assert.Eventually(t, func() bool { retries := 0
_, err := clientConn.Read(make([]byte, 16)) retry:
return err != nil time.Sleep(delayBeforeTestingConnClose)
}, time.Second, 10*time.Millisecond, "closing stream on server side is not reflected to the client") if _, err := clientConn.Read(make([]byte, 16)); err == nil {
retries++
if retries > connCloseRetries {
t.Errorf("closing stream on server side is not reflected to the client: %v", err)
} else {
goto retry
}
}
}) })
t.Run("closing from client", func(t *testing.T) { t.Run("closing from client", func(t *testing.T) {
@ -477,10 +498,17 @@ func TestClosingStreamsFromProxy(t *testing.T) {
serverConn, _ := proxyFromCkServerL.Accept() serverConn, _ := proxyFromCkServerL.Accept()
clientConn.Close() clientConn.Close()
assert.Eventually(t, func() bool { retries := 0
_, err := serverConn.Read(make([]byte, 16)) retry:
return err != nil time.Sleep(delayBeforeTestingConnClose)
}, time.Second, 10*time.Millisecond, "closing stream on client side is not reflected to the server") if _, err := serverConn.Read(make([]byte, 16)); err == nil {
retries++
if retries > 3 {
t.Errorf("closing stream on client side is not reflected to the server: %v", err)
} else {
goto retry
}
}
}) })
t.Run("send then close", func(t *testing.T) { t.Run("send then close", func(t *testing.T) {
@ -510,18 +538,19 @@ func TestClosingStreamsFromProxy(t *testing.T) {
} }
} }
func BenchmarkIntegration(b *testing.B) { func BenchmarkThroughput(b *testing.B) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
log.SetLevel(log.ErrorLevel) log.SetLevel(log.ErrorLevel)
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(basicTCPConfig, worldState) lcc, rcc, ai := generateClientConfigs(basicTCPConfig, worldState)
sta := basicServerState(worldState) sta := basicServerState(worldState, tmpDB)
const bufSize = 16 * 1024 const bufSize = 16 * 1024
encryptionMethods := map[string]byte{ encryptionMethods := map[string]byte{
"plain": mux.EncryptionMethodPlain, "plain": mux.E_METHOD_PLAIN,
"chacha20-poly1305": mux.EncryptionMethodChaha20Poly1305, "chacha20-poly1305": mux.E_METHOD_CHACHA20_POLY1305,
"aes-256-gcm": mux.EncryptionMethodAES256GCM, "aes-gcm": mux.E_METHOD_AES_GCM,
"aes-128-gcm": mux.EncryptionMethodAES128GCM,
} }
for name, method := range encryptionMethods { for name, method := range encryptionMethods {
@ -532,7 +561,7 @@ func BenchmarkIntegration(b *testing.B) {
b.Fatal(err) b.Fatal(err)
} }
b.Run("single stream bandwidth", func(b *testing.B) { b.Run("single stream", func(b *testing.B) {
more := make(chan int, 10) more := make(chan int, 10)
go func() { go func() {
// sender // sender
@ -556,19 +585,6 @@ func BenchmarkIntegration(b *testing.B) {
} }
}) })
b.Run("single stream latency", func(b *testing.B) {
clientConn, _ := proxyToCkClientD.Dial("", "")
buf := []byte{1}
clientConn.Write(buf)
serverConn, _ := proxyFromCkServerL.Accept()
serverConn.Read(buf)
b.ResetTimer()
for i := 0; i < b.N; i++ {
clientConn.Write(buf)
serverConn.Read(buf)
}
})
}) })
} }

View File

@ -1,12 +1,15 @@
#!/usr/bin/env bash go get github.com/mitchellh/gox
set -eu
go install github.com/mitchellh/gox@latest
mkdir -p release mkdir -p release
rm -f ./release/* read -rp "Cleaning $PWD/release directory. Proceed? [y/n]" res
if [ ! "$res" == "y" ]; then
echo "Abort"
exit 1
fi
rm -rf ./release/*
if [ -z "$v" ]; then if [ -z "$v" ]; then
echo "Version number cannot be null. Run with v=[version] release.sh" echo "Version number cannot be null. Run with v=[version] release.sh"
@ -14,24 +17,20 @@ if [ -z "$v" ]; then
fi fi
output="{{.Dir}}-{{.OS}}-{{.Arch}}-$v" output="{{.Dir}}-{{.OS}}-{{.Arch}}-$v"
osarch="!darwin/arm !darwin/386" osarch="!darwin/arm !darwin/arm64 !darwin/386"
echo "Compiling:" echo "Compiling:"
os="windows linux darwin" os="windows linux darwin"
arch="amd64 386 arm arm64 mips mips64 mipsle mips64le" arch="amd64 386 arm arm64 mips mips64 mipsle mips64le"
pushd cmd/ck-client pushd cmd/ck-client || exit 1
CGO_ENABLED=0 gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output" gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
CGO_ENABLED=0 GOOS="linux" GOARCH="mips" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mips_softfloat-"${v}" GOOS="linux" GOARCH="mips" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mips_softfloat-"${v}"
CGO_ENABLED=0 GOOS="linux" GOARCH="mipsle" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mipsle_softfloat-"${v}" GOOS="linux" GOARCH="mipsle" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mipsle_softfloat-"${v}"
mv ck-client-* ../../release mv ck-client-* ../../release
popd
os="linux" os="linux"
arch="amd64 386 arm arm64" arch="amd64 386 arm arm64"
pushd cmd/ck-server pushd ../ck-server || exit 1
CGO_ENABLED=0 gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output" gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
mv ck-server-* ../../release mv ck-server-* ../../release
popd
sha256sum release/*

View File

@ -1,13 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
],
"packageRules": [
{
"packagePatterns": ["*"],
"excludePackagePatterns": ["utls"],
"enabled": false
}
]
}