Compare commits

..

No commits in common. "master" and "v2.2.3" have entirely different histories.

90 changed files with 2975 additions and 3919 deletions

View File

@ -1,91 +0,0 @@
name: Build and test
on: [ push ]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest, macos-latest, windows-latest ]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '^1.24' # The Go version to download (if necessary) and use.
- run: go test -race -coverprofile coverage.txt -coverpkg ./... -covermode atomic ./...
- uses: codecov/codecov-action@v4
with:
files: coverage.txt
token: ${{ secrets.CODECOV_TOKEN }}
compat-test:
runs-on: ubuntu-latest
strategy:
matrix:
encryption-method: [ plain, chacha20-poly1305 ]
num-conn: [ 0, 1, 4 ]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '^1.24'
- name: Build Cloak
run: make
- name: Create configs
run: |
mkdir config
cat << EOF > config/ckclient.json
{
"Transport": "direct",
"ProxyMethod": "iperf",
"EncryptionMethod": "${{ matrix.encryption-method }}",
"UID": "Q4GAXHVgnDLXsdTpw6bmoQ==",
"PublicKey": "4dae/bF43FKGq+QbCc5P/E/MPM5qQeGIArjmJEHiZxc=",
"ServerName": "cloudflare.com",
"BrowserSig": "firefox",
"NumConn": ${{ matrix.num-conn }}
}
EOF
cat << EOF > config/ckserver.json
{
"ProxyBook": {
"iperf": [
"tcp",
"127.0.0.1:5201"
]
},
"BindAddr": [
":8443"
],
"BypassUID": [
"Q4GAXHVgnDLXsdTpw6bmoQ=="
],
"RedirAddr": "cloudflare.com",
"PrivateKey": "AAaskZJRPIAbiuaRLHsvZPvE6gzOeSjg+ZRg1ENau0Y="
}
EOF
- name: Start iperf3 server
run: docker run -d --name iperf-server --network host ajoergensen/iperf3:latest --server
- name: Test new client against old server
run: |
docker run -d --name old-cloak-server --network host -v $PWD/config:/go/Cloak/config cbeuw/cloak:latest build/ck-server -c config/ckserver.json --verbosity debug
build/ck-client -c config/ckclient.json -s 127.0.0.1 -p 8443 --verbosity debug | tee new-cloak-client.log &
docker run --network host ajoergensen/iperf3:latest --client 127.0.0.1 -p 1984
docker stop old-cloak-server
- name: Test old client against new server
run: |
build/ck-server -c config/ckserver.json --verbosity debug | tee new-cloak-server.log &
docker run -d --name old-cloak-client --network host -v $PWD/config:/go/Cloak/config cbeuw/cloak:latest build/ck-client -c config/ckclient.json -s 127.0.0.1 -p 8443 --verbosity debug
docker run --network host ajoergensen/iperf3:latest --client 127.0.0.1 -p 1984
docker stop old-cloak-client
- name: Dump docker logs
if: always()
run: |
docker container logs iperf-server > iperf-server.log
docker container logs old-cloak-server > old-cloak-server.log
docker container logs old-cloak-client > old-cloak-client.log
- name: Upload logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.encryption-method }}-${{ matrix.num-conn }}-conn-logs
path: ./*.log

View File

@ -1,50 +0,0 @@
on:
push:
tags:
- 'v*'
name: Create Release
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build
run: |
export PATH=${PATH}:`go env GOPATH`/bin
v=${GITHUB_REF#refs/*/} ./release.sh
- name: Release
uses: softprops/action-gh-release@v1
with:
files: release/*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
build-docker:
runs-on: ubuntu-latest
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
cbeuw/cloak
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v6
with:
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

6
.gitignore vendored
View File

@ -1,6 +0,0 @@
corpus/
suppressions/
crashers/
*.zip
.idea/
build/

10
.travis.yml Normal file
View File

@ -0,0 +1,10 @@
language: go
go:
- "1.14"
script:
- go test -race -coverprofile=coverage.txt -coverpkg=./... -covermode=atomic ./...
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,5 +0,0 @@
FROM golang:latest
RUN git clone https://github.com/cbeuw/Cloak.git
WORKDIR Cloak
RUN make

223
README.md
View File

@ -1,239 +1,148 @@
[![Build Status](https://github.com/cbeuw/Cloak/workflows/Build%20and%20test/badge.svg)](https://github.com/cbeuw/Cloak/actions) [![Build Status](https://travis-ci.org/cbeuw/Cloak.svg?branch=master)](https://travis-ci.org/cbeuw/Cloak)
[![codecov](https://codecov.io/gh/cbeuw/Cloak/branch/master/graph/badge.svg)](https://codecov.io/gh/cbeuw/Cloak) [![codecov](https://codecov.io/gh/cbeuw/Cloak/branch/master/graph/badge.svg)](https://codecov.io/gh/cbeuw/Cloak)
[![Go Report Card](https://goreportcard.com/badge/github.com/cbeuw/Cloak)](https://goreportcard.com/report/github.com/cbeuw/Cloak) [![Go Report Card](https://goreportcard.com/badge/github.com/cbeuw/Cloak)](https://goreportcard.com/report/github.com/cbeuw/Cloak)
[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url) [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url)
<p align="center">
<img src="https://user-images.githubusercontent.com/7034308/96387206-3e214100-1198-11eb-8917-689d7c56e0cd.png" />
<img src="https://user-images.githubusercontent.com/7034308/155593583-f22bcfe2-ac22-4afb-9288-1a0e8a791a0d.svg" />
</p>
<p align="center"> ![image](https://user-images.githubusercontent.com/7034308/65361318-0a719180-dbfb-11e9-96de-56d1023856f0.png)
<img src="https://user-images.githubusercontent.com/7034308/155629720-54dd8758-ec98-4fed-b603-623f0ad83b6c.svg" />
</p>
Cloak is a [pluggable transport](https://datatracker.ietf.org/meeting/103/materials/slides-103-pearg-pt-slides-01) that enhances ![Cloak](https://user-images.githubusercontent.com/7034308/65385852-7eab5280-dd2b-11e9-8887-db449b250e2a.png)
traditional proxy tools like OpenVPN to evade [sophisticated censorship](https://en.wikipedia.org/wiki/Deep_packet_inspection) and [data discrimination](https://en.wikipedia.org/wiki/Net_bias).
Cloak is not a standalone proxy program. Rather, it works by masquerading proxied traffic as normal web browsing Cloak is a universal pluggable transport that cryptographically obfuscates proxy traffic as legitimate HTTPS traffic and disguises the proxy server as a normal web server to evade internet censorship.
activities. In contrast to traditional tools which have very prominent traffic fingerprints and can be blocked by simple filtering rules,
it's very difficult to precisely target Cloak with little false positives. This increases the collateral damage to censorship actions as
attempts to block Cloak could also damage services the censor state relies on.
To any third party observer, a host running Cloak server is indistinguishable from an innocent web server. Both while Cloak works by masquerading proxy traffic as normal web browsing traffic. This increases the collateral damage to censorship actions and therefore make it very difficult, if not impossible, for censors to selectively block censorship evasion tools and proxy servers without affecting services that the state may also heavily rely on.
passively observing traffic flow to and from the server, as well as while actively probing the behaviours of a Cloak
server. This is achieved through the use a series
of [cryptographic steganography techniques](https://github.com/cbeuw/Cloak/wiki/Steganography-and-encryption).
Cloak can be used in conjunction with any proxy program that tunnels traffic through TCP or Cloak eliminates "fingerprints" exposed by traditional proxy protocol designs which can be identified by adversaries through deep packet inspection. If a non-Cloak program or an unauthorised Cloak user (such as an adversary's prober) attempts to connect to Cloak server, it will serve as a transparent proxy between said machine and an ordinary website, so that to any unauthorised third party, a host running Cloak server is indistinguishable from an innocent web server. This is achieved through the use a series of [cryptographic stegnatography techniques](https://github.com/cbeuw/Cloak/wiki/Steganography-and-encryption).
UDP, such as Shadowsocks, OpenVPN and Tor. Multiple proxy servers can be running on the same server host and
Cloak server will act as a reverse proxy, bridging clients with their desired proxy end.
Cloak multiplexes traffic through multiple underlying TCP connections which reduces head-of-line blocking and eliminates Since Cloak is transparent, it can be used in conjunction with any proxy software that tunnels traffic through TCP or UDP, such as Shadowsocks, OpenVPN and Tor. Multiple proxy servers can be running on the same server host machine and Cloak server will act as a reverse proxy, bridging clients with their desired proxy end.
TCP handshake overhead. This also makes the traffic pattern more similar to real websites.
Cloak provides multi-user support, allowing multiple clients to connect to the proxy server on the same port (443 by Cloak multiplexes traffic through multiple underlying TCP connections which reduces head-of-line blocking and eliminates TCP handshake overhead. This also makes the traffic pattern more similar to real websites.
default). It also provides traffic management features such as usage credit and bandwidth control. This allows a proxy
server to serve multiple users even if the underlying proxy software wasn't designed for multiple users
Cloak also supports tunneling through an intermediary CDN server such as Amazon Cloudfront. Such services are so widely used, Cloak provides multi-user support, allowing multiple clients to connect to the proxy server on the same port (443 by default). It also provides traffic management features such as usage credit and bandwidth control. This allows a proxy server to serve multiple users even if the underlying proxy software wasn't designed for multiple users
attempts to disrupt traffic to them can lead to very high collateral damage for the censor.
## Quick Start Cloak has two modes of [_Transport_](https://github.com/cbeuw/Cloak/wiki/CDN-mode): `direct` and `CDN`. Clients can either connect to the host running Cloak server directly, or it can instead connect to a CDN edge server, which may be used by many legitimate websites as well, thus further increases the collateral damage to censorship.
To quickly deploy Cloak with Shadowsocks on a server, you can run This project was evolved from [GoQuiet](https://github.com/cbeuw/GoQuiet). Through multiplexing, Cloak provides a significant reduction in webpage loading time compared to GoQuiet (from 10% to 50%+, depending on the amount of content on the webpage, see [benchmarks](https://github.com/cbeuw/Cloak/wiki/Web-page-loading-benchmarks)).
this [script](https://github.com/HirbodBehnam/Shadowsocks-Cloak-Installer/blob/master/Cloak2-Installer.sh) written by
@HirbodBehnam
Table of Contents Table of Contents
================= =================
* [Quick Start](#quick-start) * [Quick Start](#quick-start)
* [Build](#build) * [Build](#build)
* [Configuration](#configuration) * [Configuration](#configuration)
* [Server](#server) * [Server](#server)
* [Client](#client) * [Client](#client)
* [Setup](#setup) * [Setup](#setup)
* [Server](#server-1) * [For the administrator of the server](#for-the-administrator-of-the-server)
* [To add users](#to-add-users) * [To add users](#to-add-users)
* [Unrestricted users](#unrestricted-users) * [Unrestricted users](#unrestricted-users)
* [Users subject to bandwidth and credit controls](#users-subject-to-bandwidth-and-credit-controls) * [Users subject to bandwidth and credit controls](#users-subject-to-bandwidth-and-credit-controls)
* [Client](#client-1) * [Instructions for clients](#instructions-for-clients)
* [Support me](#support-me) * [Support me](#support-me)
## Quick Start
To quickly deploy Cloak with Shadowsocks on a server, you can run this [script](https://github.com/HirbodBehnam/Shadowsocks-Cloak-Installer/blob/master/Cloak2-Installer.sh) written by @HirbodBehnam
## Build ## Build
If you are not using the experimental go mod support, make sure you `go get` the following dependencies:
```bash
git clone https://github.com/cbeuw/Cloak
cd Cloak
go get ./...
make
``` ```
go.etcd.io/bbolt
Built binaries will be in `build` folder. github.com/cbeuw/connutil
github.com/juju/ratelimit
github.com/gorilla/mux
github.com/gorilla/websocket
github.com/sirupsen/logrus
github.com/stretchr/testify
golang.org/x/crypto
github.com/refraction-networking/utls
```
Then run `make client` or `make server`. Output binary will be in `build` folder.
## Configuration ## Configuration
Examples of configuration files can be found under `example_config` folder.
### Server ### Server
`RedirAddr` is the redirection address when the incoming traffic is not from a Cloak client. It should be the IP and port of a webserver that responds to HTTPS (eg: `localhost:10443`), preferably with a real SSL certificate.
`RedirAddr` is the redirection address when the incoming traffic is not from a Cloak client. Ideally it should be set to `BindAddr` is a list of addresses Cloak will bind and listen to (e.g. `[":443",":80"]` to listen to port 443 and 80 on all interfaces)
a major website allowed by the censor (e.g. `www.bing.com`)
`BindAddr` is a list of addresses Cloak will bind and listen to (e.g. `[":443",":80"]` to listen to port 443 and 80 on `ProxyBook` is an object whose key is the name of the ProxyMethod used on the client-side (case-sensitive). Its value is an array whose first element is the protocol and the second element is an `IP:PORT` string of the upstream proxy server that Cloak will forward the traffic to.
all interfaces)
`ProxyBook` is an object whose key is the name of the ProxyMethod used on the client-side (case-sensitive). Its value is
an array whose first element is the protocol, and the second element is an `IP:PORT` string of the upstream proxy server
that Cloak will forward the traffic to.
Example: Example:
```json ```json
{ {
"ProxyBook": { "ProxyBook": {
"shadowsocks": [ "shadowsocks": [ "tcp", "localhost:51443" ],
"tcp", "openvpn": [ "tcp", "localhost:12345" ]
"localhost:51443" }
],
"openvpn": [
"tcp",
"localhost:12345"
]
}
} }
``` ```
`PrivateKey` is the static curve25519 Diffie-Hellman private key encoded in base64. `PrivateKey` is the static curve25519 Diffie-Hellman private key encoded in base64.
`AdminUID` is the UID of the admin user in base64.
`BypassUID` is a list of UIDs that are authorised without any bandwidth or credit limit restrictions `BypassUID` is a list of UIDs that are authorised without any bandwidth or credit limit restrictions
`AdminUID` is the UID of the admin user in base64. You can leave this empty if you only ever add users to `BypassUID`. `DatabasePath` is the path to userinfo.db. If userinfo.db doesn't exist in this directory, Cloak will create one automatically. **If Cloak is started as a Shadowsocks plugin and Shadowsocks is started with its working directory as / (e.g. starting ss-server with systemctl), you need to set this field as an absolute path to a desired folder. If you leave it as default then Cloak will attempt to create userinfo.db under /, which it doesn't have the permission to do so and will raise an error. See Issue #13.**
`DatabasePath` is the path to `userinfo.db`, which is used to store user usage information and restrictions. Cloak will `KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the upstream proxy server. Zero or negative value disables it. Default is 0 (disabled).
create the file automatically if it doesn't exist. You can leave this empty if you only ever add users to `BypassUID`.
This field also has no effect if `AdminUID` isn't a valid UID or is empty.
`KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the `StreamTimeout` is the number of seconds of no sent data after which the incoming Cloak client connection will be terminated. Default is 300 seconds.
upstream proxy server. Zero or negative value disables it. Default is 0 (disabled).
### Client ### Client
`UID` is your UID in base64. `UID` is your UID in base64.
`Transport` can be either `direct` or `CDN`. If the server host wishes you to connect to it directly, use `direct`. If `Transport` can be either `direct` or `CDN`. If the server host wishes you to connect to it directly, use `direct`. If instead a CDN is used, use `CDN`.
instead a CDN is used, use `CDN`.
`PublicKey` is the static curve25519 public key in base64, given by the server admin. `PublicKey` is the static curve25519 public key, given by the server admin.
`ProxyMethod` is the name of the proxy method you are using. This must match one of the entries in the `ProxyMethod` is the name of the proxy method you are using.
server's `ProxyBook` exactly.
`EncryptionMethod` is the name of the encryption algorithm you want Cloak to use. Options are `plain`, `aes-256-gcm` ( `EncryptionMethod` is the name of the encryption algorithm you want Cloak to use. Note: Cloak isn't intended to provide transport security. The point of encryption is to hide fingerprints of proxy protocols and render the payload statistically random-like. If the proxy protocol is already fingerprint-less, which is the case for Shadowsocks, this field can be left as `plain`. Options are `plain`, `aes-gcm` and `chacha20-poly1305`.
synonymous to `aes-gcm`), `aes-128-gcm`, and `chacha20-poly1305`. Note: Cloak isn't intended to provide transport
security. The point of encryption is to hide fingerprints of proxy protocols and render the payload statistically
random-like. **You may only leave it as `plain` if you are certain that your underlying proxy tool already provides BOTH
encryption and authentication (via AEAD or similar techniques).**
`ServerName` is the domain you want to make your ISP or firewall _think_ you are visiting. Ideally it should `ServerName` is the domain you want to make your ISP or firewall think you are visiting.
match `RedirAddr` in the server's configuration, a major site the censor allows, but it doesn't have to. Use `random` to randomize the server name for every connection made.
`AlternativeNames` is an array used alongside `ServerName` to shuffle between different ServerNames for every new `NumConn` is the amount of underlying TCP connections you want to use. The default of 4 should be appropriate for most people. Setting it too high will hinder the performance. Setting it to 0 will disable connection multiplexing and each TCP connection will spawn a separate short lived session that will be closed after it is terminated. This makes it behave like GoQuiet. This maybe useful for people with unstable connections.
connection. **This may conflict with `CDN` Transport mode** if the CDN provider prohibits domain fronting and rejects
the alternative domains.
Example: `BrowserSig` is the browser you want to **appear** to be using. It's not relevant to the browser you are actually using. Currently, `chrome` and `firefox` are supported.
```json `KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the Cloak server. Zero or negative value disables it. Default is 0 (disabled). Warning: Enabling it might make your server more detectable as a proxy, but it will make the Cloak client detect internet interruption more quickly.
{
"ServerName": "bing.com",
"AlternativeNames": ["cloudflare.com", "github.com"]
}
```
`CDNOriginHost` is the domain name of the _origin_ server (i.e. the server running Cloak) under `CDN` mode. This only `StreamTimeout` is the number of seconds of no sent data after which the incoming proxy connection will be terminated. Default is 300 seconds.
has effect when `Transport` is set to `CDN`. If unset, it will default to the remote hostname supplied via the
commandline argument (in standalone mode), or by Shadowsocks (in plugin mode). After a TLS session is established with
the CDN server, this domain name will be used in the `Host` header of the HTTP request to ask the CDN server to
establish a WebSocket connection with this host.
`CDNWsUrlPath` is the url path used to build websocket request sent under `CDN` mode, and also only has effect
when `Transport` is set to `CDN`. If unset, it will default to "/". This option is used to build the first line of the
HTTP request after a TLS session is extablished. It's mainly for a Cloak server behind a reverse proxy, while only
requests under specific url path are forwarded.
`NumConn` is the amount of underlying TCP connections you want to use. The default of 4 should be appropriate for most
people. Setting it too high will hinder the performance. Setting it to 0 will disable connection multiplexing and each
TCP connection will spawn a separate short-lived session that will be closed after it is terminated. This makes it
behave like GoQuiet. This maybe useful for people with unstable connections.
`BrowserSig` is the browser you want to **appear** to be using. It's not relevant to the browser you are actually using.
Currently, `chrome`, `firefox` and `safari` are supported.
`KeepAlive` is the number of seconds to tell the OS to wait after no activity before sending TCP KeepAlive probes to the
Cloak server. Zero or negative value disables it. Default is 0 (disabled). Warning: Enabling it might make your server
more detectable as a proxy, but it will make the Cloak client detect internet interruption more quickly.
`StreamTimeout` is the number of seconds of Cloak waits for an incoming connection from a proxy program to send any
data, after which the connection will be closed by Cloak. Cloak will not enforce any timeout on TCP connections after it
is established.
## Setup ## Setup
### For the administrator of the server
### Server 0. Set up the underlying proxy server.
0. Install at least one underlying proxy server (e.g. OpenVPN, Shadowsocks).
1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo. 1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo.
2. Run `ck-server -key`. The **public** should be given to users, the **private** key should be kept secret. 2. Run ck-server -k. The base64 string before the comma is the **public** key to be given to users, the one after the comma is the **private** key to be kept secret
3. (Skip if you only want to add unrestricted users) Run `ck-server -uid`. The new UID will be used as `AdminUID`. 3. Run `ck-server -u`. This will be used as the AdminUID
4. Copy example_config/ckserver.json into a desired location. Change `PrivateKey` to the private key you just obtained; 4. Copy example_config/ckserver.json into a desired location. Change `PrivateKey` to the private key you just obtained; change `AdminUID` to the UID you just obtained.
change `AdminUID` to the UID you just obtained. 5. Configure your underlying proxy server so that they all listen on localhost. Edit `ProxyBook` in the configuration file accordingly
5. Configure your underlying proxy server so that they all listen on localhost. Edit `ProxyBook` in the configuration 6. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides) Run `sudo ck-server -c <path to ckserver.json>`. ck-server needs root privilege because it binds to a low numbered port (443). Alternatively you can follow https://superuser.com/a/892391 to avoid granting ck-server root privilege unnecessarily.
file accordingly
6. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides)
Run `sudo ck-server -c <path to ckserver.json>`. ck-server needs root privilege because it binds to a low numbered
port (443). Alternatively you can follow https://superuser.com/a/892391 to avoid granting ck-server root privilege
unnecessarily.
#### To add users #### To add users
##### Unrestricted users ##### Unrestricted users
Run `ck-server -u` and add the UID into the `BypassUID` field in `ckserver.json`
Run `ck-server -uid` and add the UID into the `BypassUID` field in `ckserver.json`
##### Users subject to bandwidth and credit controls ##### Users subject to bandwidth and credit controls
1. On your client, run `ck-client -s <IP of the server> -l <A local port> -a <AdminUID> -c <path-to-ckclient.json>` to enter admin mode
0. First make sure you have `AdminUID` generated and set in `ckserver.json`, along with a path to `userinfo.db` 2. Visit https://cbeuw.github.io/Cloak-panel (Note: this is a static site, there is no backend and all data entered into this site are processed between your browser and the Cloak API endpoint you specified. Alternatively you can download the repo at https://github.com/cbeuw/Cloak-panel and host it on your own web server).
in `DatabasePath` (Cloak will create this file for you if it didn't already exist). 3. Type in 127.0.0.1:<the port you entered in step 1> as the API Base, and click `List`.
1. On your client, run `ck-client -s <IP of the server> -l <A local port> -a <AdminUID> -c <path-to-ckclient.json>` to
enter admin mode
2. Visit https://cbeuw.github.io/Cloak-panel (Note: this is a pure-js static site, there is no backend and all data
entered into this site are processed between your browser and the Cloak API endpoint you specified. Alternatively you
can download the repo at https://github.com/cbeuw/Cloak-panel and open `index.html` in a browser. No web server is
required).
3. Type in `127.0.0.1:<the port you entered in step 1>` as the API Base, and click `List`.
4. You can add in more users by clicking the `+` panel 4. You can add in more users by clicking the `+` panel
Note: the user database is persistent as it's in-disk. You don't need to add the users again each time you start Note: the user database is persistent as it's in-disk. You don't need to add the users again each time you start ck-server.
ck-server.
### Client
### Instructions for clients
**Android client is available here: https://github.com/cbeuw/Cloak-android** **Android client is available here: https://github.com/cbeuw/Cloak-android**
0. Install the underlying proxy client corresponding to what the server has. 0. Install and configure the proxy client based on the server
1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo. 1. Download [the latest release](https://github.com/cbeuw/Cloak/releases) or clone and build this repo.
2. Obtain the public key and your UID from the administrator of your server 2. Obtain the public key and your UID from the administrator of your server
3. Copy `example_config/ckclient.json` into a location of your choice. Enter the `UID` and `PublicKey` you have 3. Copy example_config/ckclient.json into a location of your choice. Enter the `UID` and `PublicKey` you have obtained. Set `ProxyMethod` to match exactly the corresponding entry in `ProxyBook` on the server end
obtained. Set `ProxyMethod` to match exactly the corresponding entry in `ProxyBook` on the server end 4. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides) Run `ck-client -c <path to ckclient.json> -s <ip of your server>`
4. [Configure the proxy program.](https://github.com/cbeuw/Cloak/wiki/Underlying-proxy-configuration-guides)
Run `ck-client -c <path to ckclient.json> -s <ip of your server>`
## Support me ## Support me
If you find this project useful, you can visit my [merch store](https://www.redbubble.com/people/cbeuw/explore); alternatively you can donate directly to me
If you find this project useful, you can visit my [merch store](https://www.redbubble.com/people/cbeuw/explore);
alternatively you can donate directly to me
[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url) [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=SAUYKGSREP8GL&source=url)

84
azure-pipelines.yml Normal file
View File

@ -0,0 +1,84 @@
# Go
# Build your Go project.
# Add steps that test, save build artifacts, deploy, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/go
trigger:
tags:
include:
- refs/tags/v*
branches:
exclude:
- master
pool:
vmImage: 'ubuntu-latest'
variables:
GOBIN: '$(GOPATH)/bin' # Go binaries path
GOROOT: '$(Agent.BuildDirectory)/go' # Go installation path
GOPATH: '$(Agent.BuildDirectory)/gopath' # Go workspace path
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code
steps:
- script: |
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
wget "https://golang.org/dl/go1.15.2.linux-amd64.tar.gz" --output-document "$(Agent.BuildDirectory)/go1.15.2.tar.gz"
tar -C '$(Agent.BuildDirectory)' -xzf "$(Agent.BuildDirectory)/go1.15.2.tar.gz"
displayName: 'Set up the Go workspace'
- script: |
go get github.com/mitchellh/gox
v="$(git describe --tags)"
output="{{.Dir}}-{{.OS}}-{{.Arch}}-$v"
osarch="!darwin/arm !darwin/arm64 !darwin/386"
echo "Compiling:"
os="windows linux darwin"
arch="amd64 386 arm arm64 mips mips64 mipsle mips64le"
pushd cmd/ck-client || exit 1
gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
GOOS="linux" GOARCH="mips" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mips_softfloat-"${v}"
GOOS="linux" GOARCH="mipsle" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mipsle_softfloat-"${v}"
mv ck-client-* $(Build.ArtifactStagingDirectory)/
os="linux"
arch="amd64 386 arm arm64"
pushd ../ck-server || exit 1
gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
mv ck-server-* $(Build.ArtifactStagingDirectory)/
workingDirectory: '$(modulePath)'
displayName: 'Get dependencies, then build'
# GitHub Release
# Create, edit, or delete a GitHub release
- task: GitHubRelease@0
inputs:
gitHubConnection: github.com_cbeuw
repositoryName: '$(Build.Repository.Name)'
action: 'create' # Options: create, edit, delete
target: '$(Build.SourceVersion)' # Required when action == Create || Action == Edit
tagSource: 'auto' # Required when action == Create# Options: auto, manual
#tagPattern: # Optional
#tag: "$(git describe --tags)" # Required when action == Edit || Action == Delete || TagSource == Manual
#title: # Optional
#releaseNotesSource: 'file' # Optional. Options: file, input
#releaseNotesFile: # Optional
#releaseNotes: # Optional
#assets: '$(Build.ArtifactStagingDirectory)/*' # Optional
#assetUploadMode: 'delete' # Optional. Options: delete, replace
#isDraft: false # Optional
#isPreRelease: false # Optional
addChangeLog: false # Optional
#compareWith: 'lastFullRelease' # Required when addChangeLog == True. Options: lastFullRelease, lastRelease, lastReleaseByTag
#releaseTag: # Required when compareWith == LastReleaseByTag

View File

@ -1,18 +1,15 @@
//go:build go1.11
// +build go1.11 // +build go1.11
package main package main
import ( import (
"encoding/base64" "encoding/base64"
"encoding/binary"
"flag" "flag"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"net" "net"
"os" "os"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/client" "github.com/cbeuw/Cloak/internal/client"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -75,9 +72,6 @@ func main() {
log.Info("Starting standalone mode") log.Info("Starting standalone mode")
} }
log.SetFormatter(&log.TextFormatter{
FullTimestamp: true,
})
lvl, err := log.ParseLevel(*verbosity) lvl, err := log.ParseLevel(*verbosity)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -90,9 +84,7 @@ func main() {
} }
if ssPluginMode { if ssPluginMode {
if rawConfig.ProxyMethod == "" { rawConfig.ProxyMethod = "shadowsocks"
rawConfig.ProxyMethod = "shadowsocks"
}
// json takes precedence over environment variables // json takes precedence over environment variables
// i.e. if json field isn't empty, use that // i.e. if json field isn't empty, use that
if rawConfig.RemoteHost == "" { if rawConfig.RemoteHost == "" {
@ -139,7 +131,7 @@ func main() {
} }
} }
localConfig, remoteConfig, authInfo, err := rawConfig.ProcessRawConfig(common.RealWorldState) localConfig, remoteConfig, authInfo, err := rawConfig.SplitConfigs(common.RealWorldState)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -159,11 +151,10 @@ func main() {
if adminUID != nil { if adminUID != nil {
log.Infof("API base is %v", localConfig.LocalAddr) log.Infof("API base is %v", localConfig.LocalAddr)
authInfo.UID = adminUID authInfo.UID = adminUID
authInfo.SessionId = 0
remoteConfig.NumConn = 1 remoteConfig.NumConn = 1
seshMaker = func() *mux.Session { seshMaker = func() *mux.Session {
return client.MakeSession(remoteConfig, authInfo, d) return client.MakeSession(remoteConfig, authInfo, d, true)
} }
} else { } else {
var network string var network string
@ -174,33 +165,24 @@ func main() {
} }
log.Infof("Listening on %v %v for %v client", network, localConfig.LocalAddr, authInfo.ProxyMethod) log.Infof("Listening on %v %v for %v client", network, localConfig.LocalAddr, authInfo.ProxyMethod)
seshMaker = func() *mux.Session { seshMaker = func() *mux.Session {
authInfo := authInfo // copy the struct because we are overwriting SessionId return client.MakeSession(remoteConfig, authInfo, d, false)
randByte := make([]byte, 1)
common.RandRead(authInfo.WorldState.Rand, randByte)
authInfo.MockDomain = localConfig.MockDomainList[int(randByte[0])%len(localConfig.MockDomainList)]
// sessionID is usergenerated. There shouldn't be a security concern because the scope of
// sessionID is limited to its UID.
quad := make([]byte, 4)
common.RandRead(authInfo.WorldState.Rand, quad)
authInfo.SessionId = binary.BigEndian.Uint32(quad)
return client.MakeSession(remoteConfig, authInfo, d)
} }
} }
useSessionPerConnection := remoteConfig.NumConn == 0
if authInfo.Unordered { if authInfo.Unordered {
acceptor := func() (*net.UDPConn, error) { acceptor := func() (*net.UDPConn, error) {
udpAddr, _ := net.ResolveUDPAddr("udp", localConfig.LocalAddr) udpAddr, _ := net.ResolveUDPAddr("udp", localConfig.LocalAddr)
return net.ListenUDP("udp", udpAddr) return net.ListenUDP("udp", udpAddr)
} }
client.RouteUDP(acceptor, localConfig.Timeout, remoteConfig.Singleplex, seshMaker) client.RouteUDP(acceptor, localConfig.Timeout, seshMaker, useSessionPerConnection)
} else { } else {
listener, err := net.Listen("tcp", localConfig.LocalAddr) listener, err := net.Listen("tcp", localConfig.LocalAddr)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
client.RouteTCP(listener, localConfig.Timeout, remoteConfig.Singleplex, seshMaker) client.RouteTCP(listener, localConfig.Timeout, seshMaker, useSessionPerConnection)
} }
} }

View File

@ -1,4 +1,3 @@
//go:build !android
// +build !android // +build !android
package main package main

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build android
// +build android // +build android
package main package main
@ -29,10 +28,9 @@ import "C"
import ( import (
"bufio" "bufio"
log "github.com/sirupsen/logrus"
"os" "os"
"unsafe" "unsafe"
log "github.com/sirupsen/logrus"
) )
var ( var (

View File

@ -1,4 +1,3 @@
//go:build !android
// +build !android // +build !android
package main package main

View File

@ -1,6 +1,4 @@
//go:build android
// +build android // +build android
package main package main
// Stolen from https://github.com/shadowsocks/overture/blob/shadowsocks/core/utils/utils_android.go // Stolen from https://github.com/shadowsocks/overture/blob/shadowsocks/core/utils/utils_android.go
@ -15,60 +13,62 @@ package main
#include <sys/un.h> #include <sys/un.h>
#include <sys/uio.h> #include <sys/uio.h>
#define ANCIL_FD_BUFFER(n) \ #define ANCIL_FD_BUFFER(n) \
struct { \ struct { \
struct cmsghdr h; \ struct cmsghdr h; \
int fd[n]; \ int fd[n]; \
} }
int ancil_send_fds_with_buffer(int sock, const int *fds, unsigned n_fds, int
void *buffer) { ancil_send_fds_with_buffer(int sock, const int *fds, unsigned n_fds, void *buffer)
struct msghdr msghdr; {
char nothing = '!'; struct msghdr msghdr;
struct iovec nothing_ptr; char nothing = '!';
struct cmsghdr *cmsg; struct iovec nothing_ptr;
int i; struct cmsghdr *cmsg;
int i;
nothing_ptr.iov_base = &nothing; nothing_ptr.iov_base = &nothing;
nothing_ptr.iov_len = 1; nothing_ptr.iov_len = 1;
msghdr.msg_name = NULL; msghdr.msg_name = NULL;
msghdr.msg_namelen = 0; msghdr.msg_namelen = 0;
msghdr.msg_iov = &nothing_ptr; msghdr.msg_iov = &nothing_ptr;
msghdr.msg_iovlen = 1; msghdr.msg_iovlen = 1;
msghdr.msg_flags = 0; msghdr.msg_flags = 0;
msghdr.msg_control = buffer; msghdr.msg_control = buffer;
msghdr.msg_controllen = sizeof(struct cmsghdr) + sizeof(int) * n_fds; msghdr.msg_controllen = sizeof(struct cmsghdr) + sizeof(int) * n_fds;
cmsg = CMSG_FIRSTHDR(&msghdr); cmsg = CMSG_FIRSTHDR(&msghdr);
cmsg->cmsg_len = msghdr.msg_controllen; cmsg->cmsg_len = msghdr.msg_controllen;
cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_type = SCM_RIGHTS;
for (i = 0; i < n_fds; i++) for(i = 0; i < n_fds; i++)
((int *)CMSG_DATA(cmsg))[i] = fds[i]; ((int *)CMSG_DATA(cmsg))[i] = fds[i];
return (sendmsg(sock, &msghdr, 0) >= 0 ? 0 : -1); return(sendmsg(sock, &msghdr, 0) >= 0 ? 0 : -1);
} }
int ancil_send_fd(int sock, int fd) { int
ANCIL_FD_BUFFER(1) buffer; ancil_send_fd(int sock, int fd)
{
ANCIL_FD_BUFFER(1) buffer;
return (ancil_send_fds_with_buffer(sock, &fd, 1, &buffer)); return(ancil_send_fds_with_buffer(sock, &fd, 1, &buffer));
} }
void set_timeout(int sock) { void
struct timeval tv; set_timeout(int sock)
tv.tv_sec = 3; {
tv.tv_usec = 0; struct timeval tv;
setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, tv.tv_sec = 3;
sizeof(struct timeval)); tv.tv_usec = 0;
setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(struct timeval));
sizeof(struct timeval)); setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, sizeof(struct timeval));
} }
*/ */
import "C" import "C"
import ( import (
"syscall"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"syscall"
) )
// In Android, once an app starts the VpnService, all outgoing traffic are routed by the system // In Android, once an app starts the VpnService, all outgoing traffic are routed by the system

View File

@ -3,21 +3,20 @@ package main
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server"
log "github.com/sirupsen/logrus"
"net" "net"
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
"os" "os"
"runtime" "runtime"
"strings" "strings"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server"
log "github.com/sirupsen/logrus"
) )
var version string var version string
func resolveBindAddr(bindAddrs []string) ([]net.Addr, error) { func parseBindAddr(bindAddrs []string) ([]net.Addr, error) {
var addrs []net.Addr var addrs []net.Addr
for _, addr := range bindAddrs { for _, addr := range bindAddrs {
bindAddr, err := net.ResolveTCPAddr("tcp", addr) bindAddr, err := net.ResolveTCPAddr("tcp", addr)
@ -29,54 +28,11 @@ func resolveBindAddr(bindAddrs []string) ([]net.Addr, error) {
return addrs, nil return addrs, nil
} }
// parse what shadowsocks server wants us to bind and harmonise it with what's already in bindAddr from
// our own config's BindAddr. This prevents duplicate bindings etc.
func parseSSBindAddr(ssRemoteHost string, ssRemotePort string, ckBindAddr *[]net.Addr) error {
var ssBind string
// When listening on an IPv6 and IPv4, SS gives REMOTE_HOST as e.g. ::|0.0.0.0
v4nv6 := len(strings.Split(ssRemoteHost, "|")) == 2
if v4nv6 {
ssBind = ":" + ssRemotePort
} else {
ssBind = net.JoinHostPort(ssRemoteHost, ssRemotePort)
}
ssBindAddr, err := net.ResolveTCPAddr("tcp", ssBind)
if err != nil {
return fmt.Errorf("unable to resolve bind address provided by SS: %v", err)
}
shouldAppend := true
for i, addr := range *ckBindAddr {
if addr.String() == ssBindAddr.String() {
shouldAppend = false
}
if addr.String() == ":"+ssRemotePort { // already listening on all interfaces
shouldAppend = false
}
if addr.String() == "0.0.0.0:"+ssRemotePort || addr.String() == "[::]:"+ssRemotePort {
// if config listens on one ip version but ss wants to listen on both,
// listen on both
if ssBindAddr.String() == ":"+ssRemotePort {
shouldAppend = true
(*ckBindAddr)[i] = ssBindAddr
}
}
}
if shouldAppend {
*ckBindAddr = append(*ckBindAddr, ssBindAddr)
}
return nil
}
func main() { func main() {
var config string var config string
var pluginMode bool var pluginMode bool
log.SetFormatter(&log.TextFormatter{
FullTimestamp: true,
})
if os.Getenv("SS_LOCAL_HOST") != "" && os.Getenv("SS_LOCAL_PORT") != "" { if os.Getenv("SS_LOCAL_HOST") != "" && os.Getenv("SS_LOCAL_PORT") != "" {
pluginMode = true pluginMode = true
config = os.Getenv("SS_PLUGIN_OPTIONS") config = os.Getenv("SS_PLUGIN_OPTIONS")
@ -85,11 +41,8 @@ func main() {
askVersion := flag.Bool("v", false, "Print the version number") askVersion := flag.Bool("v", false, "Print the version number")
printUsage := flag.Bool("h", false, "Print this message") printUsage := flag.Bool("h", false, "Print this message")
genUIDScript := flag.Bool("u", false, "Generate a UID to STDOUT") genUID := flag.Bool("u", false, "Generate a UID")
genKeyPairScript := flag.Bool("k", false, "Generate a pair of public and private key and output to STDOUT in the format of <public key>,<private key>") genKeyPair := flag.Bool("k", false, "Generate a pair of public and private key, output in the format of pubkey,pvkey")
genUIDHuman := flag.Bool("uid", false, "Generate and print out a UID")
genKeyPairHuman := flag.Bool("key", false, "Generate and print out a public-private key pair")
pprofAddr := flag.String("d", "", "debug use: ip:port to be listened by pprof profiler") pprofAddr := flag.String("d", "", "debug use: ip:port to be listened by pprof profiler")
verbosity := flag.String("verbosity", "info", "verbosity level") verbosity := flag.String("verbosity", "info", "verbosity level")
@ -104,23 +57,13 @@ func main() {
flag.Usage() flag.Usage()
return return
} }
if *genUIDScript || *genUIDHuman { if *genUID {
uid := generateUID() fmt.Println(generateUID())
if *genUIDScript {
fmt.Println(uid)
} else {
fmt.Printf("\x1B[35mYour UID is:\u001B[0m %s\n", uid)
}
return return
} }
if *genKeyPairScript || *genKeyPairHuman { if *genKeyPair {
pub, pv := generateKeyPair() pub, pv := generateKeyPair()
if *genKeyPairScript { fmt.Printf("%v,%v", pub, pv)
fmt.Printf("%v,%v\n", pub, pv)
} else {
fmt.Printf("\x1B[36mYour PUBLIC key is:\x1B[0m %65s\n", pub)
fmt.Printf("\x1B[33mYour PRIVATE key is (keep it secret):\x1B[0m %47s\n", pv)
}
return return
} }
@ -147,20 +90,17 @@ func main() {
log.Fatalf("Configuration file error: %v", err) log.Fatalf("Configuration file error: %v", err)
} }
bindAddr, err := resolveBindAddr(raw.BindAddr) bindAddr, err := parseBindAddr(raw.BindAddr)
if err != nil { if err != nil {
log.Fatalf("unable to parse BindAddr: %v", err) log.Fatalf("unable to parse BindAddr: %v", err)
} }
// in case the user hasn't specified any local address to bind to, we listen on 443 and 80
if !pluginMode && len(bindAddr) == 0 { if !pluginMode && len(bindAddr) == 0 {
https, _ := net.ResolveTCPAddr("tcp", ":443") https, _ := net.ResolveTCPAddr("tcp", ":443")
http, _ := net.ResolveTCPAddr("tcp", ":80") http, _ := net.ResolveTCPAddr("tcp", ":80")
bindAddr = []net.Addr{https, http} bindAddr = []net.Addr{https, http}
} }
// when cloak is started as a shadowsocks plugin, we parse the address ss-server // when cloak is started as a shadowsocks plugin
// is listening on into ProxyBook, and we parse the list of bindAddr
if pluginMode { if pluginMode {
ssLocalHost := os.Getenv("SS_LOCAL_HOST") ssLocalHost := os.Getenv("SS_LOCAL_HOST")
ssLocalPort := os.Getenv("SS_LOCAL_PORT") ssLocalPort := os.Getenv("SS_LOCAL_PORT")
@ -168,9 +108,38 @@ func main() {
ssRemoteHost := os.Getenv("SS_REMOTE_HOST") ssRemoteHost := os.Getenv("SS_REMOTE_HOST")
ssRemotePort := os.Getenv("SS_REMOTE_PORT") ssRemotePort := os.Getenv("SS_REMOTE_PORT")
err = parseSSBindAddr(ssRemoteHost, ssRemotePort, &bindAddr) var ssBind string
// When listening on an IPv6 and IPv4, SS gives REMOTE_HOST as e.g. ::|0.0.0.0
v4nv6 := len(strings.Split(ssRemoteHost, "|")) == 2
if v4nv6 {
ssBind = ":" + ssRemotePort
} else {
ssBind = net.JoinHostPort(ssRemoteHost, ssRemotePort)
}
ssBindAddr, err := net.ResolveTCPAddr("tcp", ssBind)
if err != nil { if err != nil {
log.Fatalf("failed to parse SS_REMOTE_HOST and SS_REMOTE_PORT: %v", err) log.Fatalf("unable to resolve bind address provided by SS: %v", err)
}
shouldAppend := true
for i, addr := range bindAddr {
if addr.String() == ssBindAddr.String() {
shouldAppend = false
}
if addr.String() == ":"+ssRemotePort { // already listening on all interfaces
shouldAppend = false
}
if addr.String() == "0.0.0.0:"+ssRemotePort || addr.String() == "[::]:"+ssRemotePort {
// if config listens on one ip version but ss wants to listen on both,
// listen on both
if ssBindAddr.String() == ":"+ssRemotePort {
shouldAppend = true
bindAddr[i] = ssBindAddr
}
}
}
if shouldAppend {
bindAddr = append(bindAddr, ssBindAddr)
} }
} }
@ -192,7 +161,6 @@ func main() {
if i != len(bindAddr)-1 { if i != len(bindAddr)-1 {
go listen(addr) go listen(addr)
} else { } else {
// we block the main goroutine here so it doesn't quit
listen(addr) listen(addr)
} }
} }

View File

@ -1,136 +1,52 @@
package main package main
import ( import "testing"
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseBindAddr(t *testing.T) { func TestParseBindAddr(t *testing.T) {
t.Run("port only", func(t *testing.T) { t.Run("port only", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{":443"}) addrs, err := parseBindAddr([]string{":443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, ":443", addrs[0].String()) t.Error(err)
return
}
if addrs[0].String() != ":443" {
t.Errorf("expected %v got %v", ":443", addrs[0].String())
}
}) })
t.Run("specific address", func(t *testing.T) { t.Run("specific address", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{"192.168.1.123:443"}) addrs, err := parseBindAddr([]string{"192.168.1.123:443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, "192.168.1.123:443", addrs[0].String()) t.Error(err)
return
}
if addrs[0].String() != "192.168.1.123:443" {
t.Errorf("expected %v got %v", "192.168.1.123:443", addrs[0].String())
}
}) })
t.Run("ipv6", func(t *testing.T) { t.Run("ipv6", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{"[::]:443"}) addrs, err := parseBindAddr([]string{"[::]:443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, "[::]:443", addrs[0].String()) t.Error(err)
return
}
if addrs[0].String() != "[::]:443" {
t.Errorf("expected %v got %v", "[::]:443", addrs[0].String())
}
}) })
t.Run("mixed", func(t *testing.T) { t.Run("mixed", func(t *testing.T) {
addrs, err := resolveBindAddr([]string{":80", "[::]:443"}) addrs, err := parseBindAddr([]string{":80", "[::]:443"})
assert.NoError(t, err) if err != nil {
assert.Equal(t, ":80", addrs[0].String()) t.Error(err)
assert.Equal(t, "[::]:443", addrs[1].String()) return
}
if addrs[0].String() != ":80" {
t.Errorf("expected %v got %v", ":80", addrs[0].String())
}
if addrs[1].String() != "[::]:443" {
t.Errorf("expected %v got %v", "[::]:443", addrs[1].String())
}
}) })
} }
func assertSetEqual(t *testing.T, list1, list2 interface{}, msgAndArgs ...interface{}) (ok bool) {
return assert.Subset(t, list1, list2, msgAndArgs) && assert.Subset(t, list2, list1, msgAndArgs)
}
func TestParseSSBindAddr(t *testing.T) {
testTable := []struct {
name string
ssRemoteHost string
ssRemotePort string
ckBindAddr []net.Addr
expectedAddr []net.Addr
}{
{
"ss only ipv4",
"127.0.0.1",
"443",
[]net.Addr{},
[]net.Addr{
&net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 443,
},
},
},
{
"ss only ipv6",
"::",
"443",
[]net.Addr{},
[]net.Addr{
&net.TCPAddr{
IP: net.ParseIP("::"),
Port: 443,
},
},
},
//{
// "ss only ipv4 and v6",
// "::|127.0.0.1",
// "443",
// []net.Addr{},
// []net.Addr{
// &net.TCPAddr{
// IP: net.ParseIP("::"),
// Port: 443,
// },
// &net.TCPAddr{
// IP: net.ParseIP("127.0.0.1"),
// Port: 443,
// },
// },
//},
{
"ss and existing agrees",
"::",
"443",
[]net.Addr{
&net.TCPAddr{
IP: net.ParseIP("::"),
Port: 443,
},
},
[]net.Addr{
&net.TCPAddr{
IP: net.ParseIP("::"),
Port: 443,
},
},
},
{
"ss adds onto existing",
"127.0.0.1",
"80",
[]net.Addr{
&net.TCPAddr{
IP: net.ParseIP("::"),
Port: 443,
},
},
[]net.Addr{
&net.TCPAddr{
IP: net.ParseIP("::"),
Port: 443,
},
&net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 80,
},
},
},
}
for _, test := range testTable {
test := test
t.Run(test.name, func(t *testing.T) {
assert.NoError(t, parseSSBindAddr(test.ssRemoteHost, test.ssRemotePort, &test.ckBindAddr))
assertSetEqual(t, test.ckBindAddr, test.expectedAddr)
})
}
}

View File

@ -3,7 +3,6 @@ package main
import ( import (
"crypto/rand" "crypto/rand"
"encoding/base64" "encoding/base64"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
) )

View File

@ -1,4 +0,0 @@
coverage:
status:
project: off
patch: off

View File

@ -2,8 +2,8 @@
"Transport": "direct", "Transport": "direct",
"ProxyMethod": "shadowsocks", "ProxyMethod": "shadowsocks",
"EncryptionMethod": "plain", "EncryptionMethod": "plain",
"UID": "---Your UID here---", "UID": "5nneblJy6lniPJfr81LuYQ==",
"PublicKey": "---Public key here---", "PublicKey": "IYoUzkle/T/kriE+Ufdm7AHQtIeGnBWbhhlTbmDpUUI=",
"ServerName": "www.bing.com", "ServerName": "www.bing.com",
"NumConn": 4, "NumConn": 4,
"BrowserSig": "chrome", "BrowserSig": "chrome",

View File

@ -18,10 +18,11 @@
":80" ":80"
], ],
"BypassUID": [ "BypassUID": [
"---Bypass UID here---" "1rmq6Ag1jZJCImLBIL5wzQ=="
], ],
"RedirAddr": "cloudflare.com", "RedirAddr": "204.79.197.200:443",
"PrivateKey": "---Private key here---", "PrivateKey": "EN5aPEpNBO+vw+BtFQY2OnK9bQU7rvEj5qmnmgwEtUc=",
"AdminUID": "---Admin UID here (optional)---", "AdminUID": "5nneblJy6lniPJfr81LuYQ==",
"DatabasePath": "userinfo.db" "DatabasePath": "userinfo.db",
"StreamTimeout": 300
} }

40
go.mod
View File

@ -1,30 +1,20 @@
module github.com/cbeuw/Cloak module github.com/cbeuw/Cloak
go 1.24.0 go 1.13
toolchain go1.24.2
require ( require (
github.com/cbeuw/connutil v0.0.0-20200411215123-966bfaa51ee3 github.com/cbeuw/connutil v0.0.0-20200411160121-c5a5c4a9de14
github.com/gorilla/mux v1.8.1 github.com/gorilla/mux v1.7.3
github.com/gorilla/websocket v1.5.3 github.com/gorilla/websocket v1.4.1
github.com/juju/ratelimit v1.0.2 github.com/juju/ratelimit v1.0.1
github.com/refraction-networking/utls v1.8.0 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/sirupsen/logrus v1.9.3 github.com/kr/pretty v0.1.0 // indirect
github.com/stretchr/testify v1.10.0 github.com/mitchellh/gox v1.0.1 // indirect
go.etcd.io/bbolt v1.4.0 github.com/refraction-networking/utls v0.0.0-20190909200633-43c36d3c1f57
golang.org/x/crypto v0.37.0 github.com/sirupsen/logrus v1.5.0
) github.com/stretchr/testify v1.2.2
go.etcd.io/bbolt v1.3.4
require ( golang.org/x/crypto v0.0.0-20200414173820-0848c9571904
github.com/andybalholm/brotli v1.1.1 // indirect golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
github.com/cloudflare/circl v1.6.1 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
golang.org/x/sys v0.32.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

99
go.sum
View File

@ -1,61 +1,50 @@
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/cbeuw/connutil v0.0.0-20200411160121-c5a5c4a9de14 h1:bWJKlzTJR7C9DX0l1qhkTaP1lTEBWVDKhg8C/tNJqKg=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/cbeuw/connutil v0.0.0-20200411160121-c5a5c4a9de14/go.mod h1:6jR2SzckGv8hIIS9zWJ160mzGVVOYp4AXZMDtacL6LE=
github.com/cbeuw/connutil v0.0.0-20200411215123-966bfaa51ee3 h1:LRxW8pdmWmyhoNh+TxUjxsAinGtCsVGjsl3xg6zoRSs=
github.com/cbeuw/connutil v0.0.0-20200411215123-966bfaa51ee3/go.mod h1:6jR2SzckGv8hIIS9zWJ160mzGVVOYp4AXZMDtacL6LE=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8=
github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/refraction-networking/utls v1.6.6 h1:igFsYBUJPYM8Rno9xUuDoM5GQrVEqY4llzEXOkL43Ig= github.com/refraction-networking/utls v0.0.0-20190909200633-43c36d3c1f57 h1:SL1K0QAuC1b54KoY1pjPWe6kSlsFHwK9/oC960fKrTY=
github.com/refraction-networking/utls v1.6.6/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0= github.com/refraction-networking/utls v0.0.0-20190909200633-43c36d3c1f57/go.mod h1:tz9gX959MEFfFN5whTIocCLUG57WiILqtdVxI8c6Wj0=
github.com/refraction-networking/utls v1.7.0/go.mod h1:lV0Gwc1/Fi+HYH8hOtgFRdHfKo4FKSn6+FdyOz9hRms= github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
github.com/refraction-networking/utls v1.7.3 h1:L0WRhHY7Oq1T0zkdzVZMR6zWZv+sXbHB9zcuvsAEqCo= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/refraction-networking/utls v1.7.3/go.mod h1:TUhh27RHMGtQvjQq+RyO11P6ZNQNBb3N0v7wsEjKAIQ= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/refraction-networking/utls v1.8.0 h1:L38krhiTAyj9EeiQQa2sg+hYb4qwLCqdMcpZrRfbONE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/refraction-networking/utls v1.8.0/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,11 +1,10 @@
package client package client
import ( import (
"encoding/binary"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
utls "github.com/refraction-networking/utls"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"net" "net"
"strings"
) )
const appDataMaxLength = 16401 const appDataMaxLength = 16401
@ -14,131 +13,65 @@ type clientHelloFields struct {
random []byte random []byte
sessionId []byte sessionId []byte
x25519KeyShare []byte x25519KeyShare []byte
serverName string sni []byte
} }
type browser int type browser interface {
composeClientHello(clientHelloFields) []byte
}
const ( func makeServerName(serverName string) []byte {
chrome = iota serverNameListLength := make([]byte, 2)
firefox binary.BigEndian.PutUint16(serverNameListLength, uint16(len(serverName)+3))
safari serverNameType := []byte{0x00} // host_name
) serverNameLength := make([]byte, 2)
binary.BigEndian.PutUint16(serverNameLength, uint16(len(serverName)))
ret := make([]byte, 2+1+2+len(serverName))
copy(ret[0:2], serverNameListLength)
copy(ret[2:3], serverNameType)
copy(ret[3:5], serverNameLength)
copy(ret[5:], serverName)
return ret
}
// addExtensionRecord, add type, length to extension data
func addExtRec(typ []byte, data []byte) []byte {
length := make([]byte, 2)
binary.BigEndian.PutUint16(length, uint16(len(data)))
ret := make([]byte, 2+2+len(data))
copy(ret[0:2], typ)
copy(ret[2:4], length)
copy(ret[4:], data)
return ret
}
func genStegClientHello(ai authenticationPayload, serverName string) (ret clientHelloFields) {
// random is marshalled ephemeral pub key 32 bytes
// The authentication ciphertext and its tag are then distributed among SessionId and X25519KeyShare
ret.random = ai.randPubKey[:]
ret.sessionId = ai.ciphertextWithTag[0:32]
ret.x25519KeyShare = ai.ciphertextWithTag[32:64]
ret.sni = makeServerName(serverName)
return
}
type DirectTLS struct { type DirectTLS struct {
*common.TLSConn *common.TLSConn
browser browser browser browser
} }
var topLevelDomains = []string{"com", "net", "org", "it", "fr", "me", "ru", "cn", "es", "tr", "top", "xyz", "info"} // NewClientTransport handles the TLS handshake for a given conn and returns the sessionKey
func randomServerName() string {
/*
Copyright: Proton AG
https://github.com/ProtonVPN/wireguard-go/commit/bcf344b39b213c1f32147851af0d2a8da9266883
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
charNum := int('z') - int('a') + 1
size := 3 + common.RandInt(10)
name := make([]byte, size)
for i := range name {
name[i] = byte(int('a') + common.RandInt(charNum))
}
return string(name) + "." + common.RandItem(topLevelDomains)
}
func buildClientHello(browser browser, fields clientHelloFields) ([]byte, error) {
// We don't use utls to handle connections (as it'll attempt a real TLS negotiation)
// We only want it to build the ClientHello locally
fakeConn := net.TCPConn{}
var helloID utls.ClientHelloID
switch browser {
case chrome:
helloID = utls.HelloChrome_Auto
case firefox:
helloID = utls.HelloFirefox_Auto
case safari:
helloID = utls.HelloSafari_Auto
}
uclient := utls.UClient(&fakeConn, &utls.Config{ServerName: fields.serverName}, helloID)
if err := uclient.BuildHandshakeState(); err != nil {
return []byte{}, err
}
if err := uclient.SetClientRandom(fields.random); err != nil {
return []byte{}, err
}
uclient.HandshakeState.Hello.SessionId = make([]byte, 32)
copy(uclient.HandshakeState.Hello.SessionId, fields.sessionId)
// Find the X25519 key share and overwrite it
var extIndex int
var keyShareIndex int
for i, ext := range uclient.Extensions {
ext, ok := ext.(*utls.KeyShareExtension)
if ok {
extIndex = i
for j, keyShare := range ext.KeyShares {
if keyShare.Group == utls.X25519 {
keyShareIndex = j
}
}
}
}
copy(uclient.Extensions[extIndex].(*utls.KeyShareExtension).KeyShares[keyShareIndex].Data, fields.x25519KeyShare)
if err := uclient.BuildHandshakeState(); err != nil {
return []byte{}, err
}
return uclient.HandshakeState.Hello.Raw, nil
}
// Handshake handles the TLS handshake for a given conn and returns the sessionKey
// if the server proceed with Cloak authentication // if the server proceed with Cloak authentication
func (tls *DirectTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) { func (tls *DirectTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) {
payload, sharedSecret := makeAuthenticationPayload(authInfo) payload, sharedSecret := makeAuthenticationPayload(authInfo)
chOnly := tls.browser.composeClientHello(genStegClientHello(payload, authInfo.MockDomain))
fields := clientHelloFields{ chWithRecordLayer := common.AddRecordLayer(chOnly, common.Handshake, common.VersionTLS11)
random: payload.randPubKey[:],
sessionId: payload.ciphertextWithTag[0:32],
x25519KeyShare: payload.ciphertextWithTag[32:64],
serverName: authInfo.MockDomain,
}
if strings.EqualFold(fields.serverName, "random") {
fields.serverName = randomServerName()
}
var ch []byte
ch, err = buildClientHello(tls.browser, fields)
if err != nil {
return
}
chWithRecordLayer := common.AddRecordLayer(ch, common.Handshake, common.VersionTLS11)
_, err = rawConn.Write(chWithRecordLayer) _, err = rawConn.Write(chWithRecordLayer)
if err != nil { if err != nil {
return return
} }
log.Trace("client hello sent successfully") log.Trace("client hello sent successfully")
tls.TLSConn = common.NewTLSConn(rawConn) tls.TLSConn = &common.TLSConn{Conn: rawConn}
buf := make([]byte, 1024) buf := make([]byte, 1024)
log.Trace("waiting for ServerHello") log.Trace("waiting for ServerHello")

View File

@ -0,0 +1,43 @@
package client
import (
"bytes"
"encoding/hex"
"testing"
)
func htob(s string) []byte {
b, _ := hex.DecodeString(s)
return b
}
func TestMakeServerName(t *testing.T) {
type testingPair struct {
serverName string
target []byte
}
pairs := []testingPair{
{
"www.google.com",
htob("001100000e7777772e676f6f676c652e636f6d"),
},
{
"www.gstatic.com",
htob("001200000f7777772e677374617469632e636f6d"),
},
{
"googleads.g.doubleclick.net",
htob("001e00001b676f6f676c656164732e672e646f75626c65636c69636b2e6e6574"),
},
}
for _, p := range pairs {
if !bytes.Equal(makeServerName(p.serverName), p.target) {
t.Error(
"for", p.serverName,
"expecting", p.target,
"got", makeServerName(p.serverName))
}
}
}

View File

@ -2,10 +2,8 @@ package client
import ( import (
"encoding/binary" "encoding/binary"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
log "github.com/sirupsen/logrus"
) )
const ( const (
@ -28,28 +26,21 @@ func makeAuthenticationPayload(authInfo AuthInfo) (ret authenticationPayload, sh
| 16 bytes | 12 bytes | 1 byte | 8 bytes | 4 bytes | 1 byte | 6 bytes | | 16 bytes | 12 bytes | 1 byte | 8 bytes | 4 bytes | 1 byte | 6 bytes |
+----------+----------------+---------------------+-------------+--------------+--------+------------+ +----------+----------------+---------------------+-------------+--------------+--------+------------+
*/ */
ephPv, ephPub, err := ecdh.GenerateKey(authInfo.WorldState.Rand) ephPv, ephPub, _ := ecdh.GenerateKey(authInfo.WorldState.Rand)
if err != nil {
log.Panicf("failed to generate ephemeral key pair: %v", err)
}
copy(ret.randPubKey[:], ecdh.Marshal(ephPub)) copy(ret.randPubKey[:], ecdh.Marshal(ephPub))
plaintext := make([]byte, 48) plaintext := make([]byte, 48)
copy(plaintext, authInfo.UID) copy(plaintext, authInfo.UID)
copy(plaintext[16:28], authInfo.ProxyMethod) copy(plaintext[16:28], authInfo.ProxyMethod)
plaintext[28] = authInfo.EncryptionMethod plaintext[28] = authInfo.EncryptionMethod
binary.BigEndian.PutUint64(plaintext[29:37], uint64(authInfo.WorldState.Now().UTC().Unix())) binary.BigEndian.PutUint64(plaintext[29:37], uint64(authInfo.WorldState.Now().Unix()))
binary.BigEndian.PutUint32(plaintext[37:41], authInfo.SessionId) binary.BigEndian.PutUint32(plaintext[37:41], authInfo.SessionId)
if authInfo.Unordered { if authInfo.Unordered {
plaintext[41] |= UNORDERED_FLAG plaintext[41] |= UNORDERED_FLAG
} }
secret, err := ecdh.GenerateSharedSecret(ephPv, authInfo.ServerPubKey) copy(sharedSecret[:], ecdh.GenerateSharedSecret(ephPv, authInfo.ServerPubKey))
if err != nil {
log.Panicf("error in generating shared secret: %v", err)
}
copy(sharedSecret[:], secret)
ciphertextWithTag, _ := common.AESGCMEncrypt(ret.randPubKey[:12], sharedSecret[:], plaintext) ciphertextWithTag, _ := common.AESGCMEncrypt(ret.randPubKey[:12], sharedSecret[:], plaintext)
copy(ret.ciphertextWithTag[:], ciphertextWithTag[:]) copy(ret.ciphertextWithTag[:], ciphertextWithTag[:])
return return

View File

@ -2,12 +2,10 @@ package client
import ( import (
"bytes" "bytes"
"testing"
"time"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/multiplex" "github.com/cbeuw/Cloak/internal/multiplex"
"github.com/stretchr/testify/assert" "testing"
"time"
) )
func TestMakeAuthenticationPayload(t *testing.T) { func TestMakeAuthenticationPayload(t *testing.T) {
@ -29,7 +27,7 @@ func TestMakeAuthenticationPayload(t *testing.T) {
0x01, 0xd0, 0xb4, 0x87, 0x86, 0x9c, 0x15, 0x9b, 0x01, 0xd0, 0xb4, 0x87, 0x86, 0x9c, 0x15, 0x9b,
0x86, 0x19, 0x53, 0x6e, 0x60, 0xe9, 0x51, 0x42}, 0x86, 0x19, 0x53, 0x6e, 0x60, 0xe9, 0x51, 0x42},
ProxyMethod: "shadowsocks", ProxyMethod: "shadowsocks",
EncryptionMethod: multiplex.EncryptionMethodPlain, EncryptionMethod: multiplex.E_METHOD_PLAIN,
MockDomain: "d2jkinvisak5y9.cloudfront.net", MockDomain: "d2jkinvisak5y9.cloudfront.net",
WorldState: common.WorldState{ WorldState: common.WorldState{
Rand: bytes.NewBuffer([]byte{ Rand: bytes.NewBuffer([]byte{
@ -66,8 +64,12 @@ func TestMakeAuthenticationPayload(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
func() { func() {
payload, sharedSecret := makeAuthenticationPayload(tc.authInfo) payload, sharedSecret := makeAuthenticationPayload(tc.authInfo)
assert.Equal(t, tc.expPayload, payload, "payload doesn't match") if payload != tc.expPayload {
assert.Equal(t, tc.expSecret, sharedSecret, "shared secret doesn't match") t.Errorf("payload doesn't match:\nexp %v\ngot %v", tc.expPayload, payload)
}
if sharedSecret != tc.expSecret {
t.Errorf("secret doesn't match:\nexp %x\ngot %x", tc.expPayload, payload)
}
}() }()
} }
} }

103
internal/client/chrome.go Normal file
View File

@ -0,0 +1,103 @@
// Fingerprint of Chrome 85
package client
import (
"encoding/binary"
"encoding/hex"
"github.com/cbeuw/Cloak/internal/common"
)
type Chrome struct{}
func makeGREASE() []byte {
// see https://tools.ietf.org/html/draft-davidben-tls-grease-01
// This is exclusive to Chrome.
var one [1]byte
common.CryptoRandRead(one[:])
sixteenth := one[0] % 16
monoGREASE := sixteenth*16 + 0xA
doubleGREASE := []byte{monoGREASE, monoGREASE}
return doubleGREASE
}
func (c *Chrome) composeExtensions(sni []byte, keyShare []byte) []byte {
makeSupportedGroups := func() []byte {
suppGroupListLen := []byte{0x00, 0x08}
ret := make([]byte, 2+8)
copy(ret[0:2], suppGroupListLen)
copy(ret[2:4], makeGREASE())
copy(ret[4:], []byte{0x00, 0x1d, 0x00, 0x17, 0x00, 0x18})
return ret
}
makeKeyShare := func(hidden []byte) []byte {
ret := make([]byte, 43)
ret[0], ret[1] = 0x00, 0x29 // length 41
copy(ret[2:4], makeGREASE())
ret[4], ret[5] = 0x00, 0x01 // length 1
ret[6] = 0x00
ret[7], ret[8] = 0x00, 0x1d // group x25519
ret[9], ret[10] = 0x00, 0x20 // length 32
copy(ret[11:43], hidden)
return ret
}
// extension length is always 403, and server name length is variable
var ext [17][]byte
ext[0] = addExtRec(makeGREASE(), nil) // First GREASE
ext[1] = addExtRec([]byte{0x00, 0x00}, sni) // server name indication
ext[2] = addExtRec([]byte{0x00, 0x17}, nil) // extended_master_secret
ext[3] = addExtRec([]byte{0xff, 0x01}, []byte{0x00}) // renegotiation_info
ext[4] = addExtRec([]byte{0x00, 0x0a}, makeSupportedGroups()) // supported groups
ext[5] = addExtRec([]byte{0x00, 0x0b}, []byte{0x01, 0x00}) // ec point formats
ext[6] = addExtRec([]byte{0x00, 0x23}, nil) // Session tickets
APLN, _ := hex.DecodeString("000c02683208687474702f312e31")
ext[7] = addExtRec([]byte{0x00, 0x10}, APLN) // app layer proto negotiation
ext[8] = addExtRec([]byte{0x00, 0x05}, []byte{0x01, 0x00, 0x00, 0x00, 0x00}) // status request
sigAlgo, _ := hex.DecodeString("001004030804040105030805050108060601")
ext[9] = addExtRec([]byte{0x00, 0x0d}, sigAlgo) // Signature Algorithms
ext[10] = addExtRec([]byte{0x00, 0x12}, nil) // signed cert timestamp
ext[11] = addExtRec([]byte{0x00, 0x33}, makeKeyShare(keyShare)) // key share
ext[12] = addExtRec([]byte{0x00, 0x2d}, []byte{0x01, 0x01}) // psk key exchange modes
suppVersions, _ := hex.DecodeString("0a9A9A0304030303020301") // 9A9A needs to be a GREASE
copy(suppVersions[1:3], makeGREASE())
ext[13] = addExtRec([]byte{0x00, 0x2b}, suppVersions) // supported versions
ext[14] = addExtRec([]byte{0x00, 0x1b}, []byte{0x02, 0x00, 0x02}) // compress certificate
ext[15] = addExtRec(makeGREASE(), []byte{0x00}) // Last GREASE
// len(ext[1]) + 170 + len(ext[16]) = 403
// len(ext[16]) = 233 - len(ext[1])
// 2+2+len(padding) = 233 - len(ext[1])
// len(padding) = 229 - len(ext[1])
ext[16] = addExtRec([]byte{0x00, 0x15}, make([]byte, 229-len(ext[1]))) // padding
var ret []byte
for _, e := range ext {
ret = append(ret, e...)
}
return ret
}
func (c *Chrome) composeClientHello(hd clientHelloFields) (ch []byte) {
var clientHello [12][]byte
clientHello[0] = []byte{0x01} // handshake type
clientHello[1] = []byte{0x00, 0x01, 0xfc} // length 508
clientHello[2] = []byte{0x03, 0x03} // client version
clientHello[3] = hd.random // random
clientHello[4] = []byte{0x20} // session id length 32
clientHello[5] = hd.sessionId // session id
clientHello[6] = []byte{0x00, 0x20} // cipher suites length 34
cipherSuites, _ := hex.DecodeString("130113021303c02bc02fc02cc030cca9cca8c013c014009c009d002f0035")
clientHello[7] = append(makeGREASE(), cipherSuites...) // cipher suites
clientHello[8] = []byte{0x01} // compression methods length 1
clientHello[9] = []byte{0x00} // compression methods
clientHello[11] = c.composeExtensions(hd.sni, hd.x25519KeyShare)
clientHello[10] = []byte{0x00, 0x00} // extensions length 403
binary.BigEndian.PutUint16(clientHello[10], uint16(len(clientHello[11])))
var ret []byte
for _, c := range clientHello {
ret = append(ret, c...)
}
return ret
}

View File

@ -0,0 +1,48 @@
package client
import (
"encoding/hex"
"testing"
)
func TestMakeGREASE(t *testing.T) {
a := hex.EncodeToString(makeGREASE())
if a[1] != 'a' || a[3] != 'a' {
t.Errorf("GREASE got %v", a)
}
var GREASEs []string
for i := 0; i < 50; i++ {
GREASEs = append(GREASEs, hex.EncodeToString(makeGREASE()))
}
var eqCount int
for _, g := range GREASEs {
if a == g {
eqCount++
}
}
if eqCount > 40 {
t.Error("GREASE is not random", GREASEs)
}
}
func TestComposeExtension(t *testing.T) {
serverName := "github.com"
keyShare, _ := hex.DecodeString("690f074f5c01756982269b66d58c90c47dc0f281d654c7b2c16f63c9033f5604")
sni := makeServerName(serverName)
result := (&Chrome{}).composeExtensions(sni, keyShare)
target, _ := hex.DecodeString("8a8a00000000000f000d00000a6769746875622e636f6d00170000ff01000100000a000a00088a8a001d00170018000b00020100002300000010000e000c02683208687474702f312e31000500050100000000000d0012001004030804040105030805050108060601001200000033002b00298a8a000100001d0020690f074f5c01756982269b66d58c90c47dc0f281d654c7b2c16f63c9033f5604002d00020101002b000b0a3a3a0304030303020301001b00030200024a4a000100001500d2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
for p := 0; p < len(result); p++ {
if result[p] != target[p] {
if result[p]&0x0F == 0xA && target[p]&0x0F == 0xA &&
((p > 0 && result[p-1] == result[p] && target[p-1] == target[p]) ||
(p < len(result)-1 && result[p+1] == result[p] && target[p+1] == target[p])) {
continue
}
t.Errorf("inequality at %v", p)
}
}
}

View File

@ -1,30 +1,43 @@
package client package client
import ( import (
"encoding/binary"
"github.com/cbeuw/Cloak/internal/common"
"net" "net"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// On different invocations to MakeSession, authInfo.SessionId MUST be different func MakeSession(connConfig RemoteConnConfig, authInfo AuthInfo, dialer common.Dialer, isAdmin bool) *mux.Session {
func MakeSession(connConfig RemoteConnConfig, authInfo AuthInfo, dialer common.Dialer) *mux.Session {
log.Info("Attempting to start a new session") log.Info("Attempting to start a new session")
//TODO: let caller set this
if !isAdmin {
// sessionID is usergenerated. There shouldn't be a security concern because the scope of
// sessionID is limited to its UID.
quad := make([]byte, 4)
common.RandRead(authInfo.WorldState.Rand, quad)
authInfo.SessionId = binary.BigEndian.Uint32(quad)
} else {
authInfo.SessionId = 0
}
connsCh := make(chan net.Conn, connConfig.NumConn) numConn := connConfig.NumConn
if numConn <= 0 {
log.Infof("Using session per connection (no multiplexing)")
numConn = 1
}
connsCh := make(chan net.Conn, numConn)
var _sessionKey atomic.Value var _sessionKey atomic.Value
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < connConfig.NumConn; i++ { for i := 0; i < numConn; i++ {
wg.Add(1) wg.Add(1)
transportConfig := connConfig.Transport
go func() { go func() {
makeconn: makeconn:
transportConn := transportConfig.CreateTransport()
remoteConn, err := dialer.Dial("tcp", connConfig.RemoteAddr) remoteConn, err := dialer.Dial("tcp", connConfig.RemoteAddr)
if err != nil { if err != nil {
log.Errorf("Failed to establish new connections to remote: %v", err) log.Errorf("Failed to establish new connections to remote: %v", err)
@ -33,23 +46,14 @@ func MakeSession(connConfig RemoteConnConfig, authInfo AuthInfo, dialer common.D
goto makeconn goto makeconn
} }
transportConn := connConfig.TransportMaker()
sk, err := transportConn.Handshake(remoteConn, authInfo) sk, err := transportConn.Handshake(remoteConn, authInfo)
if err != nil { if err != nil {
log.Errorf("Failed to prepare connection to remote: %v", err)
transportConn.Close() transportConn.Close()
log.Errorf("Failed to prepare connection to remote: %v", err)
// In Cloak v2.11.0, we've updated uTLS version and subsequently increased the first packet size for chrome above 1500
// https://github.com/cbeuw/Cloak/pull/306#issuecomment-2862728738. As a backwards compatibility feature, if we fail
// to connect using chrome signature, retry with firefox which has a smaller packet size.
if transportConfig.mode == "direct" && transportConfig.browser == chrome {
transportConfig.browser = firefox
log.Warnf("failed to connect with chrome signature, falling back to retry with firefox")
}
time.Sleep(time.Second * 3) time.Sleep(time.Second * 3)
goto makeconn goto makeconn
} }
// sessionKey given by each connection should be identical
_sessionKey.Store(sk) _sessionKey.Store(sk)
connsCh <- transportConn connsCh <- transportConn
wg.Done() wg.Done()
@ -65,15 +69,14 @@ func MakeSession(connConfig RemoteConnConfig, authInfo AuthInfo, dialer common.D
} }
seshConfig := mux.SessionConfig{ seshConfig := mux.SessionConfig{
Singleplex: connConfig.Singleplex, Obfuscator: obfuscator,
Obfuscator: obfuscator, Valve: nil,
Valve: nil, Unordered: authInfo.Unordered,
Unordered: authInfo.Unordered, MaxFrameSize: appDataMaxLength,
MsgOnWireSizeLimit: appDataMaxLength,
} }
sesh := mux.MakeSession(authInfo.SessionId, seshConfig) sesh := mux.MakeSession(authInfo.SessionId, seshConfig)
for i := 0; i < connConfig.NumConn; i++ { for i := 0; i < numConn; i++ {
conn := <-connsCh conn := <-connsCh
sesh.AddConnection(conn) sesh.AddConnection(conn)
} }

View File

@ -0,0 +1,77 @@
// Fingerprint of Firefox 68
package client
import (
"encoding/binary"
"encoding/hex"
"github.com/cbeuw/Cloak/internal/common"
)
type Firefox struct{}
func (f *Firefox) composeExtensions(SNI []byte, keyShare []byte) []byte {
composeKeyShare := func(hidden []byte) []byte {
ret := make([]byte, 107)
ret[0], ret[1] = 0x00, 0x69 // length 105
ret[2], ret[3] = 0x00, 0x1d // group x25519
ret[4], ret[5] = 0x00, 0x20 // length 32
copy(ret[6:38], hidden)
ret[38], ret[39] = 0x00, 0x17 // group secp256r1
ret[40], ret[41] = 0x00, 0x41 // length 65
common.CryptoRandRead(ret[42:107])
return ret
}
// extension length is always 399, and server name length is variable
var ext [14][]byte
ext[0] = addExtRec([]byte{0x00, 0x00}, SNI) // server name indication
ext[1] = addExtRec([]byte{0x00, 0x17}, nil) // extended_master_secret
ext[2] = addExtRec([]byte{0xff, 0x01}, []byte{0x00}) // renegotiation_info
suppGroup, _ := hex.DecodeString("000c001d00170018001901000101")
ext[3] = addExtRec([]byte{0x00, 0x0a}, suppGroup) // supported groups
ext[4] = addExtRec([]byte{0x00, 0x0b}, []byte{0x01, 0x00}) // ec point formats
ext[5] = addExtRec([]byte{0x00, 0x23}, []byte{}) // Session tickets
APLN, _ := hex.DecodeString("000c02683208687474702f312e31")
ext[6] = addExtRec([]byte{0x00, 0x10}, APLN) // app layer proto negotiation
ext[7] = addExtRec([]byte{0x00, 0x05}, []byte{0x01, 0x00, 0x00, 0x00, 0x00}) // status request
ext[8] = addExtRec([]byte{0x00, 0x33}, composeKeyShare(keyShare)) // key share
suppVersions, _ := hex.DecodeString("080304030303020301")
ext[9] = addExtRec([]byte{0x00, 0x2b}, suppVersions) // supported versions
sigAlgo, _ := hex.DecodeString("001604030503060308040805080604010501060102030201")
ext[10] = addExtRec([]byte{0x00, 0x0d}, sigAlgo) // Signature Algorithms
ext[11] = addExtRec([]byte{0x00, 0x2d}, []byte{0x01, 0x01}) // psk key exchange modes
ext[12] = addExtRec([]byte{0x00, 0x1c}, []byte{0x40, 0x01}) // record size limit
// len(ext[0]) + 237 + 4 + len(padding) = 399
// len(padding) = 158 - len(ext[0])
ext[13] = addExtRec([]byte{0x00, 0x15}, make([]byte, 163-len(SNI))) // padding
var ret []byte
for _, e := range ext {
ret = append(ret, e...)
}
return ret
}
func (f *Firefox) composeClientHello(hd clientHelloFields) (ch []byte) {
var clientHello [12][]byte
clientHello[0] = []byte{0x01} // handshake type
clientHello[1] = []byte{0x00, 0x01, 0xfc} // length 508
clientHello[2] = []byte{0x03, 0x03} // client version
clientHello[3] = hd.random // random
clientHello[4] = []byte{0x20} // session id length 32
clientHello[5] = hd.sessionId // session id
clientHello[6] = []byte{0x00, 0x24} // cipher suites length 36
cipherSuites, _ := hex.DecodeString("130113031302c02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a")
clientHello[7] = cipherSuites // cipher suites
clientHello[8] = []byte{0x01} // compression methods length 1
clientHello[9] = []byte{0x00} // compression methods
clientHello[11] = f.composeExtensions(hd.sni, hd.x25519KeyShare)
clientHello[10] = []byte{0x00, 0x00} // extensions length
binary.BigEndian.PutUint16(clientHello[10], uint16(len(clientHello[11])))
var ret []byte
for _, c := range clientHello {
ret = append(ret, c...)
}
return ret
}

View File

@ -0,0 +1,20 @@
package client
import (
"bytes"
"encoding/hex"
"testing"
)
func TestComposeExtensions(t *testing.T) {
target, _ := hex.DecodeString("000000170015000012636f6e73656e742e676f6f676c652e636f6d00170000ff01000100000a000e000c001d00170018001901000101000b00020100002300000010000e000c02683208687474702f312e310005000501000000000033006b0069001d00206075db0a43812b2e4e0f44157f04295b484ccfc6d70e577c1e6113aa18e088270017004104948052ae52043e654641660ebbadb527c8280262e61f64b0f6f1794f32e1000865a49e4cbe2027c78e7180861e4336300815fa0f1b0091c4d788b97f809a47d3002b0009080304030303020301000d0018001604030503060308040805080604010501060102030201002d00020101001c000240010015008c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
serverName := "consent.google.com"
keyShare, _ := hex.DecodeString("6075db0a43812b2e4e0f44157f04295b484ccfc6d70e577c1e6113aa18e08827")
sni := makeServerName(serverName)
result := (&Firefox{}).composeExtensions(sni, keyShare)
// skip random secp256r1
if !bytes.Equal(result[:137], target[:137]) || !bytes.Equal(result[202:], target[202:]) {
t.Errorf("got %x", result)
}
}

View File

@ -1,26 +1,40 @@
package client package client
import ( import (
"github.com/cbeuw/Cloak/internal/common"
"io" "io"
"net" "net"
"sync"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration, singleplex bool, newSeshFunc func() *mux.Session) { type ConnWithReadFromTimeout interface {
net.Conn
SetReadFromTimeout(d time.Duration)
}
type CloseSessionAfterCloseStream struct {
ConnWithReadFromTimeout
Session *mux.Session
}
func (s *CloseSessionAfterCloseStream) Close() error {
if err := s.ConnWithReadFromTimeout.Close(); err != nil {
return err
}
return s.Session.Close()
}
func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration, newSeshFunc func() *mux.Session, useSessionPerConnection bool) {
var sesh *mux.Session var sesh *mux.Session
localConn, err := bindFunc() localConn, err := bindFunc()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
streams := make(map[string]*mux.Stream) streams := make(map[string]ConnWithReadFromTimeout)
var streamsMutex sync.Mutex
data := make([]byte, 8192) data := make([]byte, 8192)
for { for {
@ -30,72 +44,67 @@ func RouteUDP(bindFunc func() (*net.UDPConn, error), streamTimeout time.Duration
continue continue
} }
if !singleplex && (sesh == nil || sesh.IsClosed()) { if !useSessionPerConnection && (sesh == nil || sesh.IsClosed()) {
sesh = newSeshFunc() sesh = newSeshFunc()
} }
streamsMutex.Lock() var stream ConnWithReadFromTimeout
stream, ok := streams[addr.String()] stream, ok := streams[addr.String()]
if !ok { if !ok {
if singleplex { connectionSession := sesh
sesh = newSeshFunc() if useSessionPerConnection {
connectionSession = newSeshFunc()
} }
stream, err = sesh.OpenStream() stream, err = connectionSession.OpenStream()
if err != nil { if err != nil {
if singleplex {
sesh.Close()
}
log.Errorf("Failed to open stream: %v", err) log.Errorf("Failed to open stream: %v", err)
streamsMutex.Unlock() if useSessionPerConnection {
connectionSession.Close()
}
continue continue
} }
if useSessionPerConnection {
stream = &CloseSessionAfterCloseStream{
ConnWithReadFromTimeout: stream,
Session: connectionSession,
}
}
streams[addr.String()] = stream streams[addr.String()] = stream
streamsMutex.Unlock()
_ = stream.SetReadDeadline(time.Now().Add(streamTimeout))
proxyAddr := addr proxyAddr := addr
go func(stream *mux.Stream, localConn *net.UDPConn) { go func() {
buf := make([]byte, 8192) buf := make([]byte, 8192)
for { for {
n, err := stream.Read(buf) n, err := stream.Read(buf)
if err != nil { if err != nil {
log.Tracef("copying stream to proxy client: %v", err) log.Tracef("copying stream to proxy client: %v", err)
break stream.Close()
return
} }
_ = stream.SetReadDeadline(time.Now().Add(streamTimeout))
_, err = localConn.WriteTo(buf[:n], proxyAddr) _, err = localConn.WriteTo(buf[:n], proxyAddr)
if err != nil { if err != nil {
log.Tracef("copying stream to proxy client: %v", err) log.Tracef("copying stream to proxy client: %v", err)
break stream.Close()
return
} }
} }
streamsMutex.Lock() }()
delete(streams, addr.String())
streamsMutex.Unlock()
stream.Close()
return
}(stream, localConn)
} else {
streamsMutex.Unlock()
} }
_, err = stream.Write(data[:i]) _, err = stream.Write(data[:i])
if err != nil { if err != nil {
log.Tracef("copying proxy client to stream: %v", err) log.Tracef("copying proxy client to stream: %v", err)
streamsMutex.Lock()
delete(streams, addr.String()) delete(streams, addr.String())
streamsMutex.Unlock()
stream.Close() stream.Close()
continue continue
} }
_ = stream.SetReadDeadline(time.Now().Add(streamTimeout))
} }
} }
func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex bool, newSeshFunc func() *mux.Session) { func RouteTCP(listener net.Listener, streamTimeout time.Duration, newSeshFunc func() *mux.Session, useSessionPerConnection bool) {
var sesh *mux.Session var sesh *mux.Session
for { for {
localConn, err := listener.Accept() localConn, err := listener.Accept()
@ -103,35 +112,41 @@ func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex boo
log.Fatal(err) log.Fatal(err)
continue continue
} }
if !singleplex && (sesh == nil || sesh.IsClosed()) { if !useSessionPerConnection && (sesh == nil || sesh.IsClosed()) {
sesh = newSeshFunc() sesh = newSeshFunc()
} }
go func(sesh *mux.Session, localConn net.Conn, timeout time.Duration) { go func() {
if singleplex {
sesh = newSeshFunc()
}
data := make([]byte, 10240) data := make([]byte, 10240)
_ = localConn.SetReadDeadline(time.Now().Add(streamTimeout))
i, err := io.ReadAtLeast(localConn, data, 1) i, err := io.ReadAtLeast(localConn, data, 1)
if err != nil { if err != nil {
log.Errorf("Failed to read first packet from proxy client: %v", err) log.Errorf("Failed to read first packet from proxy client: %v", err)
localConn.Close() localConn.Close()
return return
} }
var zeroTime time.Time
_ = localConn.SetReadDeadline(zeroTime)
stream, err := sesh.OpenStream() connectionSession := sesh
if useSessionPerConnection {
connectionSession = newSeshFunc()
}
var stream ConnWithReadFromTimeout
stream, err = connectionSession.OpenStream()
if err != nil { if err != nil {
log.Errorf("Failed to open stream: %v", err) log.Errorf("Failed to open stream: %v", err)
localConn.Close() localConn.Close()
if singleplex { if useSessionPerConnection {
sesh.Close() connectionSession.Close()
} }
return return
} }
if useSessionPerConnection {
stream = &CloseSessionAfterCloseStream{
ConnWithReadFromTimeout: stream,
Session: connectionSession,
}
}
_, err = stream.Write(data[:i]) _, err = stream.Write(data[:i])
if err != nil { if err != nil {
log.Errorf("Failed to write to stream: %v", err) log.Errorf("Failed to write to stream: %v", err)
@ -140,6 +155,7 @@ func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex boo
return return
} }
stream.SetReadFromTimeout(streamTimeout) // if localConn hasn't sent anything to stream to a period of time, stream closes
go func() { go func() {
if _, err := common.Copy(localConn, stream); err != nil { if _, err := common.Copy(localConn, stream); err != nil {
log.Tracef("copying stream to proxy client: %v", err) log.Tracef("copying stream to proxy client: %v", err)
@ -148,6 +164,7 @@ func RouteTCP(listener net.Listener, streamTimeout time.Duration, singleplex boo
if _, err = common.Copy(stream, localConn); err != nil { if _, err = common.Copy(stream, localConn); err != nil {
log.Tracef("copying proxy client to stream: %v", err) log.Tracef("copying proxy client to stream: %v", err)
} }
}(sesh, localConn, streamTimeout) }()
} }
} }

View File

@ -4,22 +4,21 @@ import (
"crypto" "crypto"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus"
"io/ioutil" "io/ioutil"
"net" "net"
"strings" "strings"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
) )
// RawConfig represents the fields in the config json file // RawConfig represents the fields in the config json file
// nullable means if it's empty, a default value will be chosen in ProcessRawConfig // nullable means if it's empty, a default value will be chosen in SplitConfigs
// jsonOptional means if the json's empty, its value will be set from environment variables or commandline args // jsonOptional means if the json's empty, its value will be set from environment variables or commandline args
// but it mustn't be empty when ProcessRawConfig is called // but it mustn't be empty when SplitConfigs is called
type RawConfig struct { type RawConfig struct {
ServerName string ServerName string
ProxyMethod string ProxyMethod string
@ -27,33 +26,29 @@ type RawConfig struct {
UID []byte UID []byte
PublicKey []byte PublicKey []byte
NumConn int NumConn int
LocalHost string // jsonOptional LocalHost string // jsonOptional
LocalPort string // jsonOptional LocalPort string // jsonOptional
RemoteHost string // jsonOptional RemoteHost string // jsonOptional
RemotePort string // jsonOptional RemotePort string // jsonOptional
AlternativeNames []string // jsonOptional
// defaults set in ProcessRawConfig // defaults set in SplitConfigs
UDP bool // nullable UDP bool // nullable
BrowserSig string // nullable BrowserSig string // nullable
Transport string // nullable Transport string // nullable
CDNOriginHost string // nullable
CDNWsUrlPath string // nullable
StreamTimeout int // nullable StreamTimeout int // nullable
KeepAlive int // nullable KeepAlive int // nullable
} }
type RemoteConnConfig struct { type RemoteConnConfig struct {
Singleplex bool NumConn int
NumConn int KeepAlive time.Duration
KeepAlive time.Duration RemoteAddr string
RemoteAddr string TransportMaker func() Transport
Transport TransportConfig
} }
type LocalConnConfig struct { type LocalConnConfig struct {
LocalAddr string LocalAddr string
Timeout time.Duration Timeout time.Duration
MockDomainList []string
} }
type AuthInfo struct { type AuthInfo struct {
@ -97,20 +92,6 @@ func ssvToJson(ssv string) (ret []byte) {
} }
key := sp[0] key := sp[0]
value := sp[1] value := sp[1]
if strings.HasPrefix(key, "AlternativeNames") {
switch strings.Contains(value, ",") {
case true:
domains := strings.Split(value, ",")
for index, domain := range domains {
domains[index] = `"` + domain + `"`
}
value = strings.Join(domains, ",")
ret = append(ret, []byte(`"`+key+`":[`+value+`],`)...)
case false:
ret = append(ret, []byte(`"`+key+`":["`+value+`"],`)...)
}
continue
}
// JSON doesn't like quotation marks around int and bool // JSON doesn't like quotation marks around int and bool
// This is extremely ugly but it's still better than writing a tokeniser // This is extremely ugly but it's still better than writing a tokeniser
if elem(key, unquoted) { if elem(key, unquoted) {
@ -144,7 +125,7 @@ func ParseConfig(conf string) (raw *RawConfig, err error) {
return return
} }
func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local LocalConnConfig, remote RemoteConnConfig, auth AuthInfo, err error) { func (raw *RawConfig) SplitConfigs(worldState common.WorldState) (local LocalConnConfig, remote RemoteConnConfig, auth AuthInfo, err error) {
nullErr := func(field string) (local LocalConnConfig, remote RemoteConnConfig, auth AuthInfo, err error) { nullErr := func(field string) (local LocalConnConfig, remote RemoteConnConfig, auth AuthInfo, err error) {
err = fmt.Errorf("%v cannot be empty", field) err = fmt.Errorf("%v cannot be empty", field)
return return
@ -156,17 +137,6 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
return nullErr("ServerName") return nullErr("ServerName")
} }
auth.MockDomain = raw.ServerName auth.MockDomain = raw.ServerName
var filteredAlternativeNames []string
for _, alternativeName := range raw.AlternativeNames {
if len(alternativeName) > 0 {
filteredAlternativeNames = append(filteredAlternativeNames, alternativeName)
}
}
raw.AlternativeNames = filteredAlternativeNames
local.MockDomainList = raw.AlternativeNames
local.MockDomainList = append(local.MockDomainList, auth.MockDomain)
if raw.ProxyMethod == "" { if raw.ProxyMethod == "" {
return nullErr("ServerName") return nullErr("ServerName")
} }
@ -190,13 +160,11 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
// Encryption method // Encryption method
switch strings.ToLower(raw.EncryptionMethod) { switch strings.ToLower(raw.EncryptionMethod) {
case "plain": case "plain":
auth.EncryptionMethod = mux.EncryptionMethodPlain auth.EncryptionMethod = mux.E_METHOD_PLAIN
case "aes-gcm", "aes-256-gcm": case "aes-gcm":
auth.EncryptionMethod = mux.EncryptionMethodAES256GCM auth.EncryptionMethod = mux.E_METHOD_AES_GCM
case "aes-128-gcm":
auth.EncryptionMethod = mux.EncryptionMethodAES128GCM
case "chacha20-poly1305": case "chacha20-poly1305":
auth.EncryptionMethod = mux.EncryptionMethodChaha20Poly1305 auth.EncryptionMethod = mux.E_METHOD_CHACHA20_POLY1305
default: default:
err = fmt.Errorf("unknown encryption method %v", raw.EncryptionMethod) err = fmt.Errorf("unknown encryption method %v", raw.EncryptionMethod)
return return
@ -210,29 +178,17 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
} }
remote.RemoteAddr = net.JoinHostPort(raw.RemoteHost, raw.RemotePort) remote.RemoteAddr = net.JoinHostPort(raw.RemoteHost, raw.RemotePort)
if raw.NumConn <= 0 { if raw.NumConn <= 0 {
remote.NumConn = 1 raw.NumConn = 0
remote.Singleplex = true
} else {
remote.NumConn = raw.NumConn
remote.Singleplex = false
} }
remote.NumConn = raw.NumConn
// Transport and (if TLS mode), browser // Transport and (if TLS mode), browser
switch strings.ToLower(raw.Transport) { switch strings.ToLower(raw.Transport) {
case "cdn": case "cdn":
var cdnDomainPort string remote.TransportMaker = func() Transport {
if raw.CDNOriginHost == "" { return &WSOverTLS{
cdnDomainPort = net.JoinHostPort(raw.RemoteHost, raw.RemotePort) cdnDomainPort: remote.RemoteAddr,
} else { }
cdnDomainPort = net.JoinHostPort(raw.CDNOriginHost, raw.RemotePort)
}
if raw.CDNWsUrlPath == "" {
raw.CDNWsUrlPath = "/"
}
remote.Transport = TransportConfig{
mode: "cdn",
wsUrl: "ws://" + cdnDomainPort + raw.CDNWsUrlPath,
} }
case "direct": case "direct":
fallthrough fallthrough
@ -240,17 +196,16 @@ func (raw *RawConfig) ProcessRawConfig(worldState common.WorldState) (local Loca
var browser browser var browser browser
switch strings.ToLower(raw.BrowserSig) { switch strings.ToLower(raw.BrowserSig) {
case "firefox": case "firefox":
browser = firefox browser = &Firefox{}
case "safari":
browser = safari
case "chrome": case "chrome":
fallthrough fallthrough
default: default:
browser = chrome browser = &Chrome{}
} }
remote.Transport = TransportConfig{ remote.TransportMaker = func() Transport {
mode: "direct", return &DirectTLS{
browser: browser, browser: browser,
}
} }
} }

View File

@ -1,37 +1,20 @@
package client package client
import ( import (
"io/ioutil" "bytes"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
func TestParseConfig(t *testing.T) { func TestSSVtoJson(t *testing.T) {
ssv := "UID=iGAO85zysIyR4c09CyZSLdNhtP/ckcYu7nIPI082AHA=;PublicKey=IYoUzkle/T/kriE+Ufdm7AHQtIeGnBWbhhlTbmDpUUI=;" + ssv := "UID=iGAO85zysIyR4c09CyZSLdNhtP/ckcYu7nIPI082AHA=;PublicKey=IYoUzkle/T/kriE+Ufdm7AHQtIeGnBWbhhlTbmDpUUI=;ServerName=www.bing.com;NumConn=4;MaskBrowser=chrome;"
"ServerName=www.bing.com;NumConn=4;MaskBrowser=chrome;ProxyMethod=shadowsocks;EncryptionMethod=plain"
json := ssvToJson(ssv) json := ssvToJson(ssv)
expected := []byte(`{"UID":"iGAO85zysIyR4c09CyZSLdNhtP/ckcYu7nIPI082AHA=","PublicKey":"IYoUzkle/T/kriE+Ufdm7AHQtIeGnBWbhhlTbmDpUUI=","ServerName":"www.bing.com","NumConn":4,"MaskBrowser":"chrome","ProxyMethod":"shadowsocks","EncryptionMethod":"plain"}`) expected := []byte(`{"UID":"iGAO85zysIyR4c09CyZSLdNhtP/ckcYu7nIPI082AHA=","PublicKey":"IYoUzkle/T/kriE+Ufdm7AHQtIeGnBWbhhlTbmDpUUI=","ServerName":"www.bing.com","NumConn":4,"MaskBrowser":"chrome"}`)
if !bytes.Equal(expected, json) {
t.Run("byte equality", func(t *testing.T) { t.Error(
assert.Equal(t, expected, json) "For", "ssvToJson",
}) "expecting", string(expected),
"got", string(json),
t.Run("struct equality", func(t *testing.T) { )
tmpConfig, _ := ioutil.TempFile("", "ck_client_config") }
_, _ = tmpConfig.Write(expected)
parsedFromSSV, err := ParseConfig(ssv)
assert.NoError(t, err)
parsedFromJson, err := ParseConfig(tmpConfig.Name())
assert.NoError(t, err)
assert.Equal(t, parsedFromJson, parsedFromSSV)
})
t.Run("empty file", func(t *testing.T) {
tmpConfig, _ := ioutil.TempFile("", "ck_client_config")
_, err := ParseConfig(tmpConfig.Name())
assert.Error(t, err)
})
} }

View File

@ -8,26 +8,3 @@ type Transport interface {
Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error)
net.Conn net.Conn
} }
type TransportConfig struct {
mode string
wsUrl string
browser browser
}
func (t TransportConfig) CreateTransport() Transport {
switch t.mode {
case "cdn":
return &WSOverTLS{
wsUrl: t.wsUrl,
}
case "direct":
return &DirectTLS{
browser: t.browser,
}
default:
return nil
}
}

View File

@ -4,18 +4,17 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"net"
"net/http"
"net/url"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
utls "github.com/refraction-networking/utls" utls "github.com/refraction-networking/utls"
"net"
"net/http"
"net/url"
) )
type WSOverTLS struct { type WSOverTLS struct {
*common.WebSocketConn *common.WebSocketConn
wsUrl string cdnDomainPort string
} }
func (ws *WSOverTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) { func (ws *WSOverTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey [32]byte, err error) {
@ -24,24 +23,12 @@ func (ws *WSOverTLS) Handshake(rawConn net.Conn, authInfo AuthInfo) (sessionKey
InsecureSkipVerify: true, InsecureSkipVerify: true,
} }
uconn := utls.UClient(rawConn, utlsConfig, utls.HelloChrome_Auto) uconn := utls.UClient(rawConn, utlsConfig, utls.HelloChrome_Auto)
err = uconn.BuildHandshakeState()
if err != nil {
return
}
for i, extension := range uconn.Extensions {
_, ok := extension.(*utls.ALPNExtension)
if ok {
uconn.Extensions = append(uconn.Extensions[:i], uconn.Extensions[i+1:]...)
break
}
}
err = uconn.Handshake() err = uconn.Handshake()
if err != nil { if err != nil {
return return
} }
u, err := url.Parse(ws.wsUrl) u, err := url.Parse("ws://" + ws.cdnDomainPort)
if err != nil { if err != nil {
return sessionKey, fmt.Errorf("failed to parse ws url: %v", err) return sessionKey, fmt.Errorf("failed to parse ws url: %v", err)
} }

View File

@ -4,9 +4,7 @@ import (
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand" "crypto/rand"
"errors"
"io" "io"
"math/big"
"time" "time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -21,11 +19,6 @@ func AESGCMEncrypt(nonce []byte, key []byte, plaintext []byte) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(nonce) != aesgcm.NonceSize() {
// check here so it doesn't panic
return nil, errors.New("incorrect nonce size")
}
return aesgcm.Seal(nil, nonce, plaintext, nil), nil return aesgcm.Seal(nil, nonce, plaintext, nil), nil
} }
@ -38,10 +31,6 @@ func AESGCMDecrypt(nonce []byte, key []byte, ciphertext []byte) ([]byte, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(nonce) != aesgcm.NonceSize() {
// check here so it doesn't panic
return nil, errors.New("incorrect nonce size")
}
plain, err := aesgcm.Open(nil, nonce, ciphertext, nil) plain, err := aesgcm.Open(nil, nonce, ciphertext, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@ -53,8 +42,8 @@ func CryptoRandRead(buf []byte) {
RandRead(rand.Reader, buf) RandRead(rand.Reader, buf)
} }
func backoff(f func() error) { func RandRead(randSource io.Reader, buf []byte) {
err := f() _, err := randSource.Read(buf)
if err == nil { if err == nil {
return return
} }
@ -62,36 +51,12 @@ func backoff(f func() error) {
100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second,
3 * time.Second, 5 * time.Second} 3 * time.Second, 5 * time.Second}
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
log.Errorf("Failed to get random: %v. Retrying...", err) log.Errorf("Failed to get cryptographic random bytes: %v. Retrying...", err)
err = f() _, err = rand.Read(buf)
if err == nil { if err == nil {
return return
} }
time.Sleep(waitDur[i]) time.Sleep(time.Millisecond * waitDur[i])
} }
log.Fatal("Cannot get random after 10 retries") log.Fatal("Cannot get cryptographic random bytes after 10 retries")
}
func RandRead(randSource io.Reader, buf []byte) {
backoff(func() error {
_, err := randSource.Read(buf)
return err
})
}
func RandItem[T any](list []T) T {
return list[RandInt(len(list))]
}
func RandInt(n int) int {
s := new(int)
backoff(func() error {
size, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
if err != nil {
return err
}
*s = int(size.Int64())
return nil
})
return *s
} }

View File

@ -1,95 +0,0 @@
package common
import (
"bytes"
"encoding/hex"
"errors"
"io"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
)
const gcmTagSize = 16
func TestAESGCM(t *testing.T) {
// test vectors from https://luca-giuzzi.unibs.it/corsi/Support/papers-cryptography/gcm-spec.pdf
t.Run("correct 128", func(t *testing.T) {
key, _ := hex.DecodeString("00000000000000000000000000000000")
plaintext, _ := hex.DecodeString("")
nonce, _ := hex.DecodeString("000000000000000000000000")
ciphertext, _ := hex.DecodeString("")
tag, _ := hex.DecodeString("58e2fccefa7e3061367f1d57a4e7455a")
encryptedWithTag, err := AESGCMEncrypt(nonce, key, plaintext)
assert.NoError(t, err)
assert.Equal(t, ciphertext, encryptedWithTag[:len(plaintext)])
assert.Equal(t, tag, encryptedWithTag[len(plaintext):len(plaintext)+gcmTagSize])
decrypted, err := AESGCMDecrypt(nonce, key, encryptedWithTag)
assert.NoError(t, err)
// slight inconvenience here that assert.Equal does not consider a nil slice and an empty slice to be
// equal. decrypted should be []byte(nil) but plaintext is []byte{}
assert.True(t, bytes.Equal(plaintext, decrypted))
})
t.Run("bad key size", func(t *testing.T) {
key, _ := hex.DecodeString("0000000000000000000000000000")
plaintext, _ := hex.DecodeString("")
nonce, _ := hex.DecodeString("000000000000000000000000")
ciphertext, _ := hex.DecodeString("")
tag, _ := hex.DecodeString("58e2fccefa7e3061367f1d57a4e7455a")
_, err := AESGCMEncrypt(nonce, key, plaintext)
assert.Error(t, err)
_, err = AESGCMDecrypt(nonce, key, append(ciphertext, tag...))
assert.Error(t, err)
})
t.Run("bad nonce size", func(t *testing.T) {
key, _ := hex.DecodeString("00000000000000000000000000000000")
plaintext, _ := hex.DecodeString("")
nonce, _ := hex.DecodeString("00000000000000000000")
ciphertext, _ := hex.DecodeString("")
tag, _ := hex.DecodeString("58e2fccefa7e3061367f1d57a4e7455a")
_, err := AESGCMEncrypt(nonce, key, plaintext)
assert.Error(t, err)
_, err = AESGCMDecrypt(nonce, key, append(ciphertext, tag...))
assert.Error(t, err)
})
t.Run("bad tag", func(t *testing.T) {
key, _ := hex.DecodeString("00000000000000000000000000000000")
nonce, _ := hex.DecodeString("00000000000000000000")
ciphertext, _ := hex.DecodeString("")
tag, _ := hex.DecodeString("fffffccefa7e3061367f1d57a4e745ff")
_, err := AESGCMDecrypt(nonce, key, append(ciphertext, tag...))
assert.Error(t, err)
})
}
type failingReader struct {
fails int
reader io.Reader
}
func (f *failingReader) Read(p []byte) (n int, err error) {
if f.fails > 0 {
f.fails -= 1
return 0, errors.New("no data for you yet")
} else {
return f.reader.Read(p)
}
}
func TestRandRead(t *testing.T) {
failer := &failingReader{
fails: 3,
reader: rand.New(rand.NewSource(0)),
}
readBuf := make([]byte, 10)
RandRead(failer, readBuf)
assert.NotEqual(t, [10]byte{}, readBuf)
}

View File

@ -2,10 +2,8 @@ package common
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"io" "io"
"net" "net"
"sync"
"time" "time"
) )
@ -17,15 +15,17 @@ const (
Handshake = 22 Handshake = 22
ApplicationData = 23 ApplicationData = 23
initialWriteBufSize = 14336
) )
func AddRecordLayer(input []byte, typ byte, ver uint16) []byte { func AddRecordLayer(input []byte, typ byte, ver uint16) []byte {
msgLen := len(input) msgLen := len(input)
retLen := msgLen + recordLayerLength retLen := msgLen + recordLayerLength
var ret []byte var ret []byte
ret = make([]byte, retLen) if cap(input) >= retLen {
ret = input[:retLen]
} else {
ret = make([]byte, retLen)
}
copy(ret[recordLayerLength:], input) copy(ret[recordLayerLength:], input)
ret[0] = typ ret[0] = typ
ret[1] = byte(ver >> 8) ret[1] = byte(ver >> 8)
@ -37,18 +37,6 @@ func AddRecordLayer(input []byte, typ byte, ver uint16) []byte {
type TLSConn struct { type TLSConn struct {
net.Conn net.Conn
writeBufPool sync.Pool
}
func NewTLSConn(conn net.Conn) *TLSConn {
return &TLSConn{
Conn: conn,
writeBufPool: sync.Pool{New: func() interface{} {
b := make([]byte, 0, initialWriteBufSize)
b = append(b, ApplicationData, byte(VersionTLS13>>8), byte(VersionTLS13&0xFF))
return &b
}},
}
} }
func (tls *TLSConn) LocalAddr() net.Addr { func (tls *TLSConn) LocalAddr() net.Addr {
@ -76,9 +64,6 @@ func (tls *TLSConn) Read(buffer []byte) (n int, err error) {
// a single message can also be segmented due to MTU of the IP layer. // a single message can also be segmented due to MTU of the IP layer.
// This function guareentees a single TLS message to be read and everything // This function guareentees a single TLS message to be read and everything
// else is left in the buffer. // else is left in the buffer.
if len(buffer) < recordLayerLength {
return 0, io.ErrShortBuffer
}
_, err = io.ReadFull(tls.Conn, buffer[:recordLayerLength]) _, err = io.ReadFull(tls.Conn, buffer[:recordLayerLength])
if err != nil { if err != nil {
return return
@ -94,16 +79,9 @@ func (tls *TLSConn) Read(buffer []byte) (n int, err error) {
} }
func (tls *TLSConn) Write(in []byte) (n int, err error) { func (tls *TLSConn) Write(in []byte) (n int, err error) {
msgLen := len(in) // TODO: write record layer directly first?
if msgLen > 1<<14+256 { // https://tools.ietf.org/html/rfc8446#section-5.2 toWrite := AddRecordLayer(in, ApplicationData, VersionTLS13)
return 0, errors.New("message is too long") n, err = tls.Conn.Write(toWrite)
}
writeBuf := tls.writeBufPool.Get().(*[]byte)
*writeBuf = append(*writeBuf, byte(msgLen>>8), byte(msgLen&0xFF))
*writeBuf = append(*writeBuf, in...)
n, err = tls.Conn.Write(*writeBuf)
*writeBuf = (*writeBuf)[:3]
tls.writeBufPool.Put(writeBuf)
return n - recordLayerLength, err return n - recordLayerLength, err
} }

View File

@ -1,40 +0,0 @@
package common
import (
"net"
"testing"
)
func BenchmarkTLSConn_Write(b *testing.B) {
const bufSize = 16 * 1024
addrCh := make(chan string, 1)
go func() {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
b.Fatal(err)
}
addrCh <- listener.Addr().String()
conn, err := listener.Accept()
if err != nil {
b.Fatal(err)
}
readBuf := make([]byte, bufSize*2)
for {
_, err = conn.Read(readBuf)
if err != nil {
return
}
}
}()
data := make([]byte, bufSize)
discardConn, _ := net.Dial("tcp", <-addrCh)
tlsConn := NewTLSConn(discardConn)
defer tlsConn.Close()
b.SetBytes(bufSize)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tlsConn.Write(data)
}
})
}

View File

@ -2,11 +2,10 @@ package common
import ( import (
"errors" "errors"
"github.com/gorilla/websocket"
"io" "io"
"sync" "sync"
"time" "time"
"github.com/gorilla/websocket"
) )
// WebSocketConn implements io.ReadWriteCloser // WebSocketConn implements io.ReadWriteCloser

View File

@ -68,11 +68,13 @@ func Unmarshal(data []byte) (crypto.PublicKey, bool) {
return &pub, true return &pub, true
} }
func GenerateSharedSecret(privKey crypto.PrivateKey, pubKey crypto.PublicKey) ([]byte, error) { func GenerateSharedSecret(privKey crypto.PrivateKey, pubKey crypto.PublicKey) []byte {
var priv, pub *[32]byte var priv, pub, secret *[32]byte
priv = privKey.(*[32]byte) priv = privKey.(*[32]byte)
pub = pubKey.(*[32]byte) pub = pubKey.(*[32]byte)
secret = new([32]byte)
return curve25519.X25519(priv[:], pub[:]) curve25519.ScalarMult(secret, priv, pub)
return secret[:]
} }

View File

@ -32,7 +32,6 @@ import (
"bytes" "bytes"
"crypto" "crypto"
"crypto/rand" "crypto/rand"
"io"
"testing" "testing"
) )
@ -40,20 +39,6 @@ func TestCurve25519(t *testing.T) {
testECDH(t) testECDH(t)
} }
func TestErrors(t *testing.T) {
reader, writer := io.Pipe()
_ = writer.Close()
_, _, err := GenerateKey(reader)
if err == nil {
t.Error("GenerateKey should return error")
}
_, ok := Unmarshal([]byte{1})
if ok {
t.Error("Unmarshal should return false")
}
}
func BenchmarkCurve25519(b *testing.B) { func BenchmarkCurve25519(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
testECDH(b) testECDH(b)
@ -90,11 +75,11 @@ func testECDH(t testing.TB) {
t.Fatalf("Unmarshal does not work") t.Fatalf("Unmarshal does not work")
} }
secret1, err = GenerateSharedSecret(privKey1, pubKey2) secret1 = GenerateSharedSecret(privKey1, pubKey2)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
secret2, err = GenerateSharedSecret(privKey2, pubKey1) secret2 = GenerateSharedSecret(privKey2, pubKey1)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }

View File

@ -0,0 +1,146 @@
// This is base on https://github.com/golang/go/blob/0436b162397018c45068b47ca1b5924a3eafdee0/src/net/net_fake.go#L173
package multiplex
import (
"bytes"
"errors"
"io"
"sync"
"time"
)
const BUF_SIZE_LIMIT = 1 << 20 * 500
var ErrTimeout = errors.New("deadline exceeded")
// The point of a bufferedPipe is that Read() will block until data is available
type bufferedPipe struct {
// only alloc when on first Read or Write
buf *bytes.Buffer
closed bool
rwCond *sync.Cond
rDeadline time.Time
wtTimeout time.Duration
}
func NewBufferedPipe() *bufferedPipe {
p := &bufferedPipe{
rwCond: sync.NewCond(&sync.Mutex{}),
}
return p
}
func (p *bufferedPipe) Read(target []byte) (int, error) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
if p.buf == nil {
p.buf = new(bytes.Buffer)
}
for {
if p.closed && p.buf.Len() == 0 {
return 0, io.EOF
}
if !p.rDeadline.IsZero() {
d := time.Until(p.rDeadline)
if d <= 0 {
return 0, ErrTimeout
}
time.AfterFunc(d, p.rwCond.Broadcast)
}
if p.buf.Len() > 0 {
break
}
p.rwCond.Wait()
}
n, err := p.buf.Read(target)
// err will always be nil because we have already verified that buf.Len() != 0
p.rwCond.Broadcast()
return n, err
}
func (p *bufferedPipe) WriteTo(w io.Writer) (n int64, err error) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
if p.buf == nil {
p.buf = new(bytes.Buffer)
}
for {
if p.closed && p.buf.Len() == 0 {
return 0, io.EOF
}
if !p.rDeadline.IsZero() {
d := time.Until(p.rDeadline)
if d <= 0 {
return 0, ErrTimeout
}
if p.wtTimeout == 0 {
// if there hasn't been a scheduled broadcast
time.AfterFunc(d, p.rwCond.Broadcast)
}
}
if p.wtTimeout != 0 {
p.rDeadline = time.Now().Add(p.wtTimeout)
time.AfterFunc(p.wtTimeout, p.rwCond.Broadcast)
}
if p.buf.Len() > 0 {
written, er := p.buf.WriteTo(w)
n += written
if er != nil {
p.rwCond.Broadcast()
return n, er
}
p.rwCond.Broadcast()
} else {
p.rwCond.Wait()
}
}
}
func (p *bufferedPipe) Write(input []byte) (int, error) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
if p.buf == nil {
p.buf = new(bytes.Buffer)
}
for {
if p.closed {
return 0, io.ErrClosedPipe
}
if p.buf.Len() <= BUF_SIZE_LIMIT {
// if p.buf gets too large, write() will panic. We don't want this to happen
break
}
p.rwCond.Wait()
}
n, err := p.buf.Write(input)
// err will always be nil
p.rwCond.Broadcast()
return n, err
}
func (p *bufferedPipe) Close() error {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
p.closed = true
p.rwCond.Broadcast()
return nil
}
func (p *bufferedPipe) SetReadDeadline(t time.Time) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
p.rDeadline = t
p.rwCond.Broadcast()
}
func (p *bufferedPipe) SetWriteToTimeout(d time.Duration) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
p.wtTimeout = d
p.rwCond.Broadcast()
}

View File

@ -0,0 +1,200 @@
package multiplex
import (
"bytes"
"math/rand"
"testing"
"time"
)
func TestPipeRW(t *testing.T) {
pipe := NewBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
n, err := pipe.Write(b)
if n != len(b) {
t.Error(
"For", "number of bytes written",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "simple write",
"expecting", "nil error",
"got", err,
)
return
}
b2 := make([]byte, len(b))
n, err = pipe.Read(b2)
if n != len(b) {
t.Error(
"For", "number of bytes read",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "simple read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "simple read",
"expecting", b,
"got", b2,
)
}
}
func TestReadBlock(t *testing.T) {
pipe := NewBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
go func() {
time.Sleep(100 * time.Millisecond)
pipe.Write(b)
}()
b2 := make([]byte, len(b))
n, err := pipe.Read(b2)
if n != len(b) {
t.Error(
"For", "number of bytes read after block",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "blocked read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "blocked read",
"expecting", b,
"got", b2,
)
return
}
}
func TestPartialRead(t *testing.T) {
pipe := NewBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
pipe.Write(b)
b1 := make([]byte, 1)
n, err := pipe.Read(b1)
if n != len(b1) {
t.Error(
"For", "number of bytes in partial read of 1",
"expecting", len(b1),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "partial read of 1",
"expecting", "nil error",
"got", err,
)
return
}
if b1[0] != b[0] {
t.Error(
"For", "partial read of 1",
"expecting", b[0],
"got", b1[0],
)
}
b2 := make([]byte, 2)
n, err = pipe.Read(b2)
if n != len(b2) {
t.Error(
"For", "number of bytes in partial read of 2",
"expecting", len(b2),
"got", n,
)
}
if err != nil {
t.Error(
"For", "partial read of 2",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b[1:], b2) {
t.Error(
"For", "partial read of 2",
"expecting", b[1:],
"got", b2,
)
return
}
}
func TestReadAfterClose(t *testing.T) {
pipe := NewBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
pipe.Write(b)
b2 := make([]byte, len(b))
pipe.Close()
n, err := pipe.Read(b2)
if n != len(b) {
t.Error(
"For", "number of bytes read",
"expecting", len(b),
"got", n,
)
}
if err != nil {
t.Error(
"For", "simple read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "simple read",
"expecting", b,
"got", b2,
)
return
}
}
func BenchmarkBufferedPipe_RW(b *testing.B) {
const PAYLOAD_LEN = 1000
testData := make([]byte, PAYLOAD_LEN)
rand.Read(testData)
pipe := NewBufferedPipe()
smallBuf := make([]byte, PAYLOAD_LEN-10)
go func() {
for {
pipe.Read(smallBuf)
}
}()
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
pipe.Write(testData)
}
}

View File

@ -0,0 +1,161 @@
// This is base on https://github.com/golang/go/blob/0436b162397018c45068b47ca1b5924a3eafdee0/src/net/net_fake.go#L173
package multiplex
import (
"bytes"
"io"
"sync"
"time"
)
// datagramBuffer is the same as bufferedPipe with the exception that it's message-oriented,
// instead of byte-oriented. The integrity of datagrams written into this buffer is preserved.
// it won't get chopped up into individual bytes
type datagramBuffer struct {
pLens []int
buf *bytes.Buffer
closed bool
rwCond *sync.Cond
wtTimeout time.Duration
rDeadline time.Time
}
func NewDatagramBuffer() *datagramBuffer {
d := &datagramBuffer{
rwCond: sync.NewCond(&sync.Mutex{}),
}
return d
}
func (d *datagramBuffer) Read(target []byte) (int, error) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
if d.buf == nil {
d.buf = new(bytes.Buffer)
}
for {
if d.closed && len(d.pLens) == 0 {
return 0, io.EOF
}
if !d.rDeadline.IsZero() {
delta := time.Until(d.rDeadline)
if delta <= 0 {
return 0, ErrTimeout
}
time.AfterFunc(delta, d.rwCond.Broadcast)
}
if len(d.pLens) > 0 {
break
}
d.rwCond.Wait()
}
dataLen := d.pLens[0]
if len(target) < dataLen {
return 0, io.ErrShortBuffer
}
d.pLens = d.pLens[1:]
d.buf.Read(target[:dataLen])
// err will always be nil because we have already verified that buf.Len() != 0
d.rwCond.Broadcast()
return dataLen, nil
}
func (d *datagramBuffer) WriteTo(w io.Writer) (n int64, err error) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
if d.buf == nil {
d.buf = new(bytes.Buffer)
}
for {
if d.closed && len(d.pLens) == 0 {
return 0, io.EOF
}
if !d.rDeadline.IsZero() {
delta := time.Until(d.rDeadline)
if delta <= 0 {
return 0, ErrTimeout
}
if d.wtTimeout == 0 {
// if there hasn't been a scheduled broadcast
time.AfterFunc(delta, d.rwCond.Broadcast)
}
}
if d.wtTimeout != 0 {
d.rDeadline = time.Now().Add(d.wtTimeout)
time.AfterFunc(d.wtTimeout, d.rwCond.Broadcast)
}
if len(d.pLens) > 0 {
var dataLen int
dataLen, d.pLens = d.pLens[0], d.pLens[1:]
written, er := w.Write(d.buf.Next(dataLen))
n += int64(written)
if er != nil {
d.rwCond.Broadcast()
return n, er
}
d.rwCond.Broadcast()
} else {
d.rwCond.Wait()
}
}
}
func (d *datagramBuffer) Write(f Frame) (toBeClosed bool, err error) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
if d.buf == nil {
d.buf = new(bytes.Buffer)
}
for {
if d.closed {
return true, io.ErrClosedPipe
}
if d.buf.Len() <= BUF_SIZE_LIMIT {
// if d.buf gets too large, write() will panic. We don't want this to happen
break
}
d.rwCond.Wait()
}
if f.Closing != C_NOOP {
d.closed = true
d.rwCond.Broadcast()
return true, nil
}
dataLen := len(f.Payload)
d.pLens = append(d.pLens, dataLen)
d.buf.Write(f.Payload)
// err will always be nil
d.rwCond.Broadcast()
return false, nil
}
func (d *datagramBuffer) Close() error {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
d.closed = true
d.rwCond.Broadcast()
return nil
}
func (d *datagramBuffer) SetReadDeadline(t time.Time) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
d.rDeadline = t
d.rwCond.Broadcast()
}
func (d *datagramBuffer) SetWriteToTimeout(t time.Duration) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
d.wtTimeout = t
d.rwCond.Broadcast()
}

View File

@ -0,0 +1,140 @@
package multiplex
import (
"bytes"
"testing"
"time"
)
func TestDatagramBuffer_RW(t *testing.T) {
b := []byte{0x01, 0x02, 0x03}
t.Run("simple write", func(t *testing.T) {
pipe := NewDatagramBuffer()
_, err := pipe.Write(Frame{Payload: b})
if err != nil {
t.Error(
"expecting", "nil error",
"got", err,
)
return
}
})
t.Run("simple read", func(t *testing.T) {
pipe := NewDatagramBuffer()
_, _ = pipe.Write(Frame{Payload: b})
b2 := make([]byte, len(b))
n, err := pipe.Read(b2)
if n != len(b) {
t.Error(
"For", "number of bytes read",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"expecting", b,
"got", b2,
)
}
if pipe.buf.Len() != 0 {
t.Error("buf len is not 0 after finished reading")
return
}
})
t.Run("writing closing frame", func(t *testing.T) {
pipe := NewDatagramBuffer()
toBeClosed, err := pipe.Write(Frame{Closing: C_STREAM})
if !toBeClosed {
t.Error("should be to be closed")
}
if err != nil {
t.Error(
"expecting", "nil error",
"got", err,
)
return
}
if !pipe.closed {
t.Error("expecting closed pipe, not closed")
}
})
}
func TestDatagramBuffer_BlockingRead(t *testing.T) {
pipe := NewDatagramBuffer()
b := []byte{0x01, 0x02, 0x03}
go func() {
time.Sleep(100 * time.Millisecond)
pipe.Write(Frame{Payload: b})
}()
b2 := make([]byte, len(b))
n, err := pipe.Read(b2)
if n != len(b) {
t.Error(
"For", "number of bytes read after block",
"expecting", len(b),
"got", n,
)
return
}
if err != nil {
t.Error(
"For", "blocked read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "blocked read",
"expecting", b,
"got", b2,
)
return
}
}
func TestDatagramBuffer_CloseThenRead(t *testing.T) {
pipe := NewDatagramBuffer()
b := []byte{0x01, 0x02, 0x03}
pipe.Write(Frame{Payload: b})
b2 := make([]byte, len(b))
pipe.Close()
n, err := pipe.Read(b2)
if n != len(b) {
t.Error(
"For", "number of bytes read",
"expecting", len(b),
"got", n,
)
}
if err != nil {
t.Error(
"For", "simple read",
"expecting", "nil error",
"got", err,
)
return
}
if !bytes.Equal(b, b2) {
t.Error(
"For", "simple read",
"expecting", b,
"got", b2,
)
return
}
}

View File

@ -1,119 +0,0 @@
// This is base on https://github.com/golang/go/blob/0436b162397018c45068b47ca1b5924a3eafdee0/src/net/net_fake.go#L173
package multiplex
import (
"bytes"
"io"
"sync"
"time"
)
// datagramBufferedPipe is the same as streamBufferedPipe with the exception that it's message-oriented,
// instead of byte-oriented. The integrity of datagrams written into this buffer is preserved.
// it won't get chopped up into individual bytes
type datagramBufferedPipe struct {
pLens []int
buf *bytes.Buffer
closed bool
rwCond *sync.Cond
wtTimeout time.Duration
rDeadline time.Time
timeoutTimer *time.Timer
}
func NewDatagramBufferedPipe() *datagramBufferedPipe {
d := &datagramBufferedPipe{
rwCond: sync.NewCond(&sync.Mutex{}),
buf: new(bytes.Buffer),
}
return d
}
func (d *datagramBufferedPipe) Read(target []byte) (int, error) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
for {
if d.closed && len(d.pLens) == 0 {
return 0, io.EOF
}
hasRDeadline := !d.rDeadline.IsZero()
if hasRDeadline {
if time.Until(d.rDeadline) <= 0 {
return 0, ErrTimeout
}
}
if len(d.pLens) > 0 {
break
}
if hasRDeadline {
d.broadcastAfter(time.Until(d.rDeadline))
}
d.rwCond.Wait()
}
dataLen := d.pLens[0]
if len(target) < dataLen {
return 0, io.ErrShortBuffer
}
d.pLens = d.pLens[1:]
d.buf.Read(target[:dataLen])
// err will always be nil because we have already verified that buf.Len() != 0
d.rwCond.Broadcast()
return dataLen, nil
}
func (d *datagramBufferedPipe) Write(f *Frame) (toBeClosed bool, err error) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
for {
if d.closed {
return true, io.ErrClosedPipe
}
if d.buf.Len() <= recvBufferSizeLimit {
// if d.buf gets too large, write() will panic. We don't want this to happen
break
}
d.rwCond.Wait()
}
if f.Closing != closingNothing {
d.closed = true
d.rwCond.Broadcast()
return true, nil
}
dataLen := len(f.Payload)
d.pLens = append(d.pLens, dataLen)
d.buf.Write(f.Payload)
// err will always be nil
d.rwCond.Broadcast()
return false, nil
}
func (d *datagramBufferedPipe) Close() error {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
d.closed = true
d.rwCond.Broadcast()
return nil
}
func (d *datagramBufferedPipe) SetReadDeadline(t time.Time) {
d.rwCond.L.Lock()
defer d.rwCond.L.Unlock()
d.rDeadline = t
d.rwCond.Broadcast()
}
func (d *datagramBufferedPipe) broadcastAfter(t time.Duration) {
if d.timeoutTimer != nil {
d.timeoutTimer.Stop()
}
d.timeoutTimer = time.AfterFunc(t, d.rwCond.Broadcast)
}

View File

@ -1,62 +0,0 @@
package multiplex
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDatagramBuffer_RW(t *testing.T) {
b := []byte{0x01, 0x02, 0x03}
t.Run("simple write", func(t *testing.T) {
pipe := NewDatagramBufferedPipe()
_, err := pipe.Write(&Frame{Payload: b})
assert.NoError(t, err)
})
t.Run("simple read", func(t *testing.T) {
pipe := NewDatagramBufferedPipe()
_, _ = pipe.Write(&Frame{Payload: b})
b2 := make([]byte, len(b))
n, err := pipe.Read(b2)
assert.NoError(t, err)
assert.Equal(t, len(b), n)
assert.Equal(t, b, b2)
assert.Equal(t, 0, pipe.buf.Len(), "buf len is not 0 after finished reading")
})
t.Run("writing closing frame", func(t *testing.T) {
pipe := NewDatagramBufferedPipe()
toBeClosed, err := pipe.Write(&Frame{Closing: closingStream})
assert.NoError(t, err)
assert.True(t, toBeClosed, "should be to be closed")
assert.True(t, pipe.closed, "pipe should be closed")
})
}
func TestDatagramBuffer_BlockingRead(t *testing.T) {
pipe := NewDatagramBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
go func() {
time.Sleep(readBlockTime)
pipe.Write(&Frame{Payload: b})
}()
b2 := make([]byte, len(b))
n, err := pipe.Read(b2)
assert.NoError(t, err)
assert.Equal(t, len(b), n, "number of bytes read after block is wrong")
assert.Equal(t, b, b2)
}
func TestDatagramBuffer_CloseThenRead(t *testing.T) {
pipe := NewDatagramBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
pipe.Write(&Frame{Payload: b})
b2 := make([]byte, len(b))
pipe.Close()
n, err := pipe.Read(b2)
assert.NoError(t, err)
assert.Equal(t, len(b), n, "number of bytes read after block is wrong")
assert.Equal(t, b, b2)
}

View File

@ -1,9 +1,9 @@
package multiplex package multiplex
const ( const (
closingNothing = iota C_NOOP = iota
closingStream C_STREAM
closingSession C_SESSION
) )
type Frame struct { type Frame struct {

View File

@ -2,15 +2,14 @@ package multiplex
import ( import (
"bytes" "bytes"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/connutil"
"io" "io"
"math/rand" "math/rand"
"net" "net"
"sync" "sync"
"testing" "testing"
"time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
func serveEcho(l net.Listener) { func serveEcho(l net.Listener) {
@ -20,13 +19,13 @@ func serveEcho(l net.Listener) {
// TODO: pass the error back // TODO: pass the error back
return return
} }
go func(conn net.Conn) { go func() {
_, err := io.Copy(conn, conn) _, err := io.Copy(conn, conn)
if err != nil { if err != nil {
// TODO: pass the error back // TODO: pass the error back
return return
} }
}(conn) }()
} }
} }
@ -38,7 +37,7 @@ type connPair struct {
func makeSessionPair(numConn int) (*Session, *Session, []*connPair) { func makeSessionPair(numConn int) (*Session, *Session, []*connPair) {
sessionKey := [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} sessionKey := [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
sessionId := 1 sessionId := 1
obfuscator, _ := MakeObfuscator(EncryptionMethodChaha20Poly1305, sessionKey) obfuscator, _ := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
clientConfig := SessionConfig{ clientConfig := SessionConfig{
Obfuscator: obfuscator, Obfuscator: obfuscator,
Valve: nil, Valve: nil,
@ -52,8 +51,8 @@ func makeSessionPair(numConn int) (*Session, *Session, []*connPair) {
paris := make([]*connPair, numConn) paris := make([]*connPair, numConn)
for i := 0; i < numConn; i++ { for i := 0; i < numConn; i++ {
c, s := connutil.AsyncPipe() c, s := connutil.AsyncPipe()
clientConn := common.NewTLSConn(c) clientConn := &common.TLSConn{Conn: c}
serverConn := common.NewTLSConn(s) serverConn := &common.TLSConn{Conn: s}
paris[i] = &connPair{ paris[i] = &connPair{
clientConn: clientConn, clientConn: clientConn,
serverConn: serverConn, serverConn: serverConn,
@ -64,36 +63,31 @@ func makeSessionPair(numConn int) (*Session, *Session, []*connPair) {
return clientSession, serverSession, paris return clientSession, serverSession, paris
} }
func runEchoTest(t *testing.T, conns []net.Conn, msgLen int) { func runEchoTest(t *testing.T, streams []*Stream) {
const testDataLen = 16384
var wg sync.WaitGroup var wg sync.WaitGroup
for _, stream := range streams {
for _, conn := range conns {
wg.Add(1) wg.Add(1)
go func(conn net.Conn) { go func(stream *Stream) {
defer wg.Done() testData := make([]byte, testDataLen)
testData := make([]byte, msgLen)
rand.Read(testData) rand.Read(testData)
// we cannot call t.Fatalf in concurrent contexts n, err := stream.Write(testData)
n, err := conn.Write(testData) if n != testDataLen {
if n != msgLen { t.Fatalf("written only %v, err %v", n, err)
t.Errorf("written only %v, err %v", n, err)
return
} }
recvBuf := make([]byte, msgLen) recvBuf := make([]byte, testDataLen)
_, err = io.ReadFull(conn, recvBuf) _, err = io.ReadFull(stream, recvBuf)
if err != nil { if err != nil {
t.Errorf("failed to read back: %v", err) t.Fatalf("failed to read back: %v", err)
return
} }
if !bytes.Equal(testData, recvBuf) { if !bytes.Equal(testData, recvBuf) {
t.Errorf("echoed data not correct") t.Fatalf("echoed data not correct")
return
} }
}(conn) wg.Done()
}(stream)
} }
wg.Wait() wg.Wait()
} }
@ -101,32 +95,43 @@ func runEchoTest(t *testing.T, conns []net.Conn, msgLen int) {
func TestMultiplex(t *testing.T) { func TestMultiplex(t *testing.T) {
const numStreams = 2000 // -race option limits the number of goroutines to 8192 const numStreams = 2000 // -race option limits the number of goroutines to 8192
const numConns = 4 const numConns = 4
const msgLen = 16384
clientSession, serverSession, _ := makeSessionPair(numConns) clientSession, serverSession, _ := makeSessionPair(numConns)
go serveEcho(serverSession) go serveEcho(serverSession)
streams := make([]net.Conn, numStreams) streams := make([]*Stream, numStreams)
for i := 0; i < numStreams; i++ { for i := 0; i < numStreams; i++ {
stream, err := clientSession.OpenStream() stream, err := clientSession.OpenStream()
assert.NoError(t, err) if err != nil {
t.Fatalf("failed to open stream: %v", err)
}
streams[i] = stream streams[i] = stream
} }
//test echo //test echo
runEchoTest(t, streams, msgLen) runEchoTest(t, streams)
if clientSession.streamCount() != numStreams {
assert.EqualValues(t, numStreams, clientSession.streamCount(), "client stream count is wrong") t.Errorf("client stream count is wrong: %v", clientSession.streamCount())
assert.EqualValues(t, numStreams, serverSession.streamCount(), "server stream count is wrong") }
if serverSession.streamCount() != numStreams {
t.Errorf("server stream count is wrong: %v", serverSession.streamCount())
}
// close one stream // close one stream
closing, streams := streams[0], streams[1:] closing, streams := streams[0], streams[1:]
err := closing.Close() err := closing.Close()
assert.NoError(t, err, "couldn't close a stream") if err != nil {
t.Errorf("couldn't close a stream")
}
_, err = closing.Write([]byte{0}) _, err = closing.Write([]byte{0})
assert.Equal(t, ErrBrokenStream, err) if err != ErrBrokenStream {
t.Errorf("expecting error %v, got %v", ErrBrokenStream, err)
}
_, err = closing.Read(make([]byte, 1)) _, err = closing.Read(make([]byte, 1))
assert.Equal(t, ErrBrokenStream, err) if err != ErrBrokenStream {
t.Errorf("expecting error %v, got %v", ErrBrokenStream, err)
}
} }
func TestMux_StreamClosing(t *testing.T) { func TestMux_StreamClosing(t *testing.T) {
@ -138,13 +143,17 @@ func TestMux_StreamClosing(t *testing.T) {
recvBuf := make([]byte, 128) recvBuf := make([]byte, 128)
toBeClosed, _ := clientSession.OpenStream() toBeClosed, _ := clientSession.OpenStream()
_, err := toBeClosed.Write(testData) // should be echoed back _, err := toBeClosed.Write(testData) // should be echoed back
assert.NoError(t, err, "couldn't write to a stream") if err != nil {
t.Errorf("can't write to stream: %v", err)
_, err = io.ReadFull(toBeClosed, recvBuf[:1]) }
assert.NoError(t, err, "can't read anything before stream closed")
time.Sleep(500 * time.Millisecond)
_ = toBeClosed.Close() _ = toBeClosed.Close()
_, err = io.ReadFull(toBeClosed, recvBuf[1:]) _, err = io.ReadFull(toBeClosed, recvBuf)
assert.NoError(t, err, "can't read residual data on stream") if err != nil {
assert.Equal(t, testData, recvBuf, "incorrect data read back") t.Errorf("can't read residual data on stream: %v", err)
}
if !bytes.Equal(testData, recvBuf) {
t.Errorf("incorrect data read back")
}
} }

View File

@ -3,7 +3,6 @@ package multiplex
import ( import (
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -12,190 +11,169 @@ import (
"golang.org/x/crypto/salsa20" "golang.org/x/crypto/salsa20"
) )
const frameHeaderLength = 14 type Obfser func(*Frame, []byte, int) (int, error)
const salsa20NonceSize = 8 type Deobfser func([]byte) (*Frame, error)
// maxExtraLen equals the max length of padding + AEAD tag. var u32 = binary.BigEndian.Uint32
// It is 255 bytes because the extra len field in frame header is only one byte. var u64 = binary.BigEndian.Uint64
const maxExtraLen = 1<<8 - 1 var putU32 = binary.BigEndian.PutUint32
var putU64 = binary.BigEndian.PutUint64
// padFirstNFrames specifies the number of initial frames to pad, const HEADER_LEN = 14
// to avoid TLS-in-TLS detection
const padFirstNFrames = 5
const ( const (
EncryptionMethodPlain = iota E_METHOD_PLAIN = iota
EncryptionMethodAES256GCM E_METHOD_AES_GCM
EncryptionMethodChaha20Poly1305 E_METHOD_CHACHA20_POLY1305
EncryptionMethodAES128GCM
) )
// Obfuscator is responsible for serialisation, obfuscation, and optional encryption of data frames. // Obfuscator is responsible for the obfuscation and deobfuscation of frames
type Obfuscator struct { type Obfuscator struct {
payloadCipher cipher.AEAD // Used in Stream.Write. Add multiplexing headers, encrypt and add TLS header
Obfs Obfser
sessionKey [32]byte // Remove TLS header, decrypt and unmarshall frames
Deobfs Deobfser
SessionKey [32]byte
minOverhead int
} }
// obfuscate adds multiplexing headers, encrypt and add TLS header func MakeObfs(salsaKey [32]byte, payloadCipher cipher.AEAD) Obfser {
func (o *Obfuscator) obfuscate(f *Frame, buf []byte, payloadOffsetInBuf int) (int, error) { obfs := func(f *Frame, buf []byte, payloadOffsetInBuf int) (int, error) {
// The method here is to use the first payloadCipher.NonceSize() bytes of the serialised frame header // we need the encrypted data to be at least 8 bytes to be used as nonce for salsa20 stream header encryption
// as iv/nonce for the AEAD cipher to encrypt the frame payload. Then we use // this will be the case if the encryption method is an AEAD cipher, however for plain, it's well possible
// the authentication tag produced appended to the end of the ciphertext (of size payloadCipher.Overhead()) // that the frame payload is smaller than 8 bytes, so we need to add on the difference
// as nonce for Salsa20 to encrypt the frame header. Both with sessionKey as keys. payloadLen := len(f.Payload)
// if payloadLen == 0 {
// Several cryptographic guarantees we have made here: that payloadCipher, as an AEAD, is given a unique return 0, errors.New("payload cannot be empty")
// iv/nonce each time, relative to its key; that the frame header encryptor Salsa20 is given a unique }
// nonce each time, relative to its key; and that the authenticity of frame header is checked. var extraLen int
// if payloadCipher == nil {
// The payloadCipher is given a unique iv/nonce each time because it is derived from the frame header, which if extraLen = 8 - payloadLen; extraLen < 0 {
// contains the monotonically increasing stream id (uint32) and frame sequence (uint64). There will be a nonce extraLen = 0
// reuse after 2^64-1 frames sent (sent, not received because frames going different ways are sequenced }
// independently) by a stream, or after 2^32-1 streams created in a single session. We consider these number
// to be large enough that they may never happen in reasonable time frames. Of course, different sessions
// will produce the same combination of stream id and frame sequence, but they will have different session keys.
//
//
// Because the frame header, before it being encrypted, is fed into the AEAD, it is also authenticated.
// (rfc5116 s.2.1 "The nonce is authenticated internally to the algorithm").
//
// In case the user chooses to not encrypt the frame payload, payloadCipher will be nil. In this scenario,
// we generate random bytes to be used as salsa20 nonce.
payloadLen := len(f.Payload)
if payloadLen == 0 {
return 0, errors.New("payload cannot be empty")
}
tagLen := 0
if o.payloadCipher != nil {
tagLen = o.payloadCipher.Overhead()
} else {
tagLen = salsa20NonceSize
}
// Pad to avoid size side channel leak
padLen := 0
if f.Seq < padFirstNFrames {
padLen = common.RandInt(maxExtraLen - tagLen + 1)
}
usefulLen := frameHeaderLength + payloadLen + padLen + tagLen
if len(buf) < usefulLen {
return 0, errors.New("obfs buffer too small")
}
// we do as much in-place as possible to save allocation
payload := buf[frameHeaderLength : frameHeaderLength+payloadLen+padLen]
if payloadOffsetInBuf != frameHeaderLength {
// if payload is not at the correct location in buffer
copy(payload, f.Payload)
}
header := buf[:frameHeaderLength]
binary.BigEndian.PutUint32(header[0:4], f.StreamID)
binary.BigEndian.PutUint64(header[4:12], f.Seq)
header[12] = f.Closing
header[13] = byte(padLen + tagLen)
// Random bytes for padding and nonce
_, err := rand.Read(buf[frameHeaderLength+payloadLen : usefulLen])
if err != nil {
return 0, fmt.Errorf("failed to pad random: %w", err)
}
if o.payloadCipher != nil {
o.payloadCipher.Seal(payload[:0], header[:o.payloadCipher.NonceSize()], payload, nil)
}
nonce := buf[usefulLen-salsa20NonceSize : usefulLen]
salsa20.XORKeyStream(header, header, nonce, &o.sessionKey)
return usefulLen, nil
}
// deobfuscate removes TLS header, decrypt and unmarshall frames
func (o *Obfuscator) deobfuscate(f *Frame, in []byte) error {
if len(in) < frameHeaderLength+salsa20NonceSize {
return fmt.Errorf("input size %v, but it cannot be shorter than %v bytes", len(in), frameHeaderLength+salsa20NonceSize)
}
header := in[:frameHeaderLength]
pldWithOverHead := in[frameHeaderLength:] // payload + potential overhead
nonce := in[len(in)-salsa20NonceSize:]
salsa20.XORKeyStream(header, header, nonce, &o.sessionKey)
streamID := binary.BigEndian.Uint32(header[0:4])
seq := binary.BigEndian.Uint64(header[4:12])
closing := header[12]
extraLen := header[13]
usefulPayloadLen := len(pldWithOverHead) - int(extraLen)
if usefulPayloadLen < 0 || usefulPayloadLen > len(pldWithOverHead) {
return errors.New("extra length is negative or extra length is greater than total pldWithOverHead length")
}
var outputPayload []byte
if o.payloadCipher == nil {
if extraLen == 0 {
outputPayload = pldWithOverHead
} else { } else {
extraLen = payloadCipher.Overhead()
if extraLen < 8 {
return 0, errors.New("AEAD's Overhead cannot be fewer than 8 bytes")
}
}
usefulLen := HEADER_LEN + payloadLen + extraLen
if len(buf) < usefulLen {
return 0, errors.New("obfs buffer too small")
}
// we do as much in-place as possible to save allocation
payload := buf[HEADER_LEN : HEADER_LEN+payloadLen]
if payloadOffsetInBuf != HEADER_LEN {
// if payload is not at the correct location in buffer
copy(payload, f.Payload)
}
header := buf[:HEADER_LEN]
putU32(header[0:4], f.StreamID)
putU64(header[4:12], f.Seq)
header[12] = f.Closing
header[13] = byte(extraLen)
if payloadCipher == nil {
if extraLen != 0 { // read nonce
extra := buf[usefulLen-extraLen : usefulLen]
common.CryptoRandRead(extra)
}
} else {
payloadCipher.Seal(payload[:0], header[:12], payload, nil)
}
nonce := buf[usefulLen-8 : usefulLen]
salsa20.XORKeyStream(header, header, nonce, &salsaKey)
return usefulLen, nil
}
return obfs
}
func MakeDeobfs(salsaKey [32]byte, payloadCipher cipher.AEAD) Deobfser {
// stream header length + minimum data size (i.e. nonce size of salsa20)
const minInputLen = HEADER_LEN + 8
deobfs := func(in []byte) (*Frame, error) {
if len(in) < minInputLen {
return nil, fmt.Errorf("input size %v, but it cannot be shorter than %v bytes", len(in), minInputLen)
}
header := in[:HEADER_LEN]
pldWithOverHead := in[HEADER_LEN:] // payload + potential overhead
nonce := in[len(in)-8:]
salsa20.XORKeyStream(header, header, nonce, &salsaKey)
streamID := u32(header[0:4])
seq := u64(header[4:12])
closing := header[12]
extraLen := header[13]
usefulPayloadLen := len(pldWithOverHead) - int(extraLen)
if usefulPayloadLen < 0 || usefulPayloadLen > len(pldWithOverHead) {
return nil, errors.New("extra length is negative or extra length is greater than total pldWithOverHead length")
}
var outputPayload []byte
if payloadCipher == nil {
if extraLen == 0 {
outputPayload = pldWithOverHead
} else {
outputPayload = pldWithOverHead[:usefulPayloadLen]
}
} else {
_, err := payloadCipher.Open(pldWithOverHead[:0], header[:12], pldWithOverHead, nil)
if err != nil {
return nil, err
}
outputPayload = pldWithOverHead[:usefulPayloadLen] outputPayload = pldWithOverHead[:usefulPayloadLen]
} }
} else {
_, err := o.payloadCipher.Open(pldWithOverHead[:0], header[:o.payloadCipher.NonceSize()], pldWithOverHead, nil)
if err != nil {
return err
}
outputPayload = pldWithOverHead[:usefulPayloadLen]
}
f.StreamID = streamID ret := &Frame{
f.Seq = seq StreamID: streamID,
f.Closing = closing Seq: seq,
f.Payload = outputPayload Closing: closing,
return nil Payload: outputPayload,
}
return ret, nil
}
return deobfs
} }
func MakeObfuscator(encryptionMethod byte, sessionKey [32]byte) (o Obfuscator, err error) { func MakeObfuscator(encryptionMethod byte, sessionKey [32]byte) (obfuscator Obfuscator, err error) {
o = Obfuscator{ obfuscator = Obfuscator{
sessionKey: sessionKey, SessionKey: sessionKey,
} }
var payloadCipher cipher.AEAD
switch encryptionMethod { switch encryptionMethod {
case EncryptionMethodPlain: case E_METHOD_PLAIN:
o.payloadCipher = nil payloadCipher = nil
case EncryptionMethodAES256GCM: obfuscator.minOverhead = 0
case E_METHOD_AES_GCM:
var c cipher.Block var c cipher.Block
c, err = aes.NewCipher(sessionKey[:]) c, err = aes.NewCipher(sessionKey[:])
if err != nil { if err != nil {
return return
} }
o.payloadCipher, err = cipher.NewGCM(c) payloadCipher, err = cipher.NewGCM(c)
if err != nil { if err != nil {
return return
} }
case EncryptionMethodAES128GCM: obfuscator.minOverhead = payloadCipher.Overhead()
var c cipher.Block case E_METHOD_CHACHA20_POLY1305:
c, err = aes.NewCipher(sessionKey[:16]) payloadCipher, err = chacha20poly1305.New(sessionKey[:])
if err != nil {
return
}
o.payloadCipher, err = cipher.NewGCM(c)
if err != nil {
return
}
case EncryptionMethodChaha20Poly1305:
o.payloadCipher, err = chacha20poly1305.New(sessionKey[:])
if err != nil { if err != nil {
return return
} }
obfuscator.minOverhead = payloadCipher.Overhead()
default: default:
return o, fmt.Errorf("unknown encryption method valued %v", encryptionMethod) return obfuscator, errors.New("Unknown encryption method")
}
if o.payloadCipher != nil {
if o.payloadCipher.NonceSize() > frameHeaderLength {
return o, errors.New("payload AEAD's nonce size cannot be greater than size of frame header")
}
} }
obfuscator.Obfs = MakeObfs(sessionKey, payloadCipher)
obfuscator.Deobfs = MakeDeobfs(sessionKey, payloadCipher)
return return
} }

View File

@ -1,128 +1,73 @@
package multiplex package multiplex
import ( import (
"bytes"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"golang.org/x/crypto/chacha20poly1305"
"math/rand" "math/rand"
"reflect" "reflect"
"testing" "testing"
"testing/quick" "testing/quick"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/chacha20poly1305"
) )
func TestGenerateObfs(t *testing.T) { func TestGenerateObfs(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
run := func(o Obfuscator, t *testing.T) { run := func(obfuscator Obfuscator, ct *testing.T) {
obfsBuf := make([]byte, 512) obfsBuf := make([]byte, 512)
_testFrame, _ := quick.Value(reflect.TypeOf(Frame{}), rand.New(rand.NewSource(42))) f := &Frame{}
testFrame := _testFrame.Interface().(Frame) _testFrame, _ := quick.Value(reflect.TypeOf(f), rand.New(rand.NewSource(42)))
i, err := o.obfuscate(&testFrame, obfsBuf, 0) testFrame := _testFrame.Interface().(*Frame)
assert.NoError(t, err) i, err := obfuscator.Obfs(testFrame, obfsBuf, 0)
var resultFrame Frame if err != nil {
ct.Error("failed to obfs ", err)
return
}
err = o.deobfuscate(&resultFrame, obfsBuf[:i]) resultFrame, err := obfuscator.Deobfs(obfsBuf[:i])
assert.NoError(t, err) if err != nil {
assert.EqualValues(t, testFrame, resultFrame) ct.Error("failed to deobfs ", err)
return
}
if !bytes.Equal(testFrame.Payload, resultFrame.Payload) || testFrame.StreamID != resultFrame.StreamID {
ct.Error("expecting", testFrame,
"got", resultFrame)
return
}
} }
t.Run("plain", func(t *testing.T) { t.Run("plain", func(t *testing.T) {
o, err := MakeObfuscator(EncryptionMethodPlain, sessionKey) obfuscator, err := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
assert.NoError(t, err) if err != nil {
run(o, t) t.Errorf("failed to generate obfuscator %v", err)
} else {
run(obfuscator, t)
}
}) })
t.Run("aes-256-gcm", func(t *testing.T) { t.Run("aes-gcm", func(t *testing.T) {
o, err := MakeObfuscator(EncryptionMethodAES256GCM, sessionKey) obfuscator, err := MakeObfuscator(E_METHOD_AES_GCM, sessionKey)
assert.NoError(t, err) if err != nil {
run(o, t) t.Errorf("failed to generate obfuscator %v", err)
}) } else {
t.Run("aes-128-gcm", func(t *testing.T) { run(obfuscator, t)
o, err := MakeObfuscator(EncryptionMethodAES128GCM, sessionKey) }
assert.NoError(t, err)
run(o, t)
}) })
t.Run("chacha20-poly1305", func(t *testing.T) { t.Run("chacha20-poly1305", func(t *testing.T) {
o, err := MakeObfuscator(EncryptionMethodChaha20Poly1305, sessionKey) obfuscator, err := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
assert.NoError(t, err) if err != nil {
run(o, t) t.Errorf("failed to generate obfuscator %v", err)
} else {
run(obfuscator, t)
}
}) })
t.Run("unknown encryption method", func(t *testing.T) { t.Run("unknown encryption method", func(t *testing.T) {
_, err := MakeObfuscator(0xff, sessionKey) _, err := MakeObfuscator(0xff, sessionKey)
assert.Error(t, err) if err == nil {
}) t.Errorf("unknown encryption mehtod error expected")
}
func TestObfuscate(t *testing.T) {
var sessionKey [32]byte
rand.Read(sessionKey[:])
const testPayloadLen = 1024
testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload)
f := Frame{
StreamID: 0,
Seq: 0,
Closing: 0,
Payload: testPayload,
}
runTest := func(t *testing.T, o Obfuscator) {
obfsBuf := make([]byte, testPayloadLen*2)
n, err := o.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
resultFrame := Frame{}
err = o.deobfuscate(&resultFrame, obfsBuf[:n])
assert.NoError(t, err)
assert.EqualValues(t, f, resultFrame)
}
t.Run("plain", func(t *testing.T) {
o := Obfuscator{
payloadCipher: nil,
sessionKey: sessionKey,
} }
runTest(t, o)
}) })
t.Run("aes-128-gcm", func(t *testing.T) {
c, err := aes.NewCipher(sessionKey[:16])
assert.NoError(t, err)
payloadCipher, err := cipher.NewGCM(c)
assert.NoError(t, err)
o := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: sessionKey,
}
runTest(t, o)
})
t.Run("aes-256-gcm", func(t *testing.T) {
c, err := aes.NewCipher(sessionKey[:])
assert.NoError(t, err)
payloadCipher, err := cipher.NewGCM(c)
assert.NoError(t, err)
o := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: sessionKey,
}
runTest(t, o)
})
t.Run("chacha20-poly1305", func(t *testing.T) {
payloadCipher, err := chacha20poly1305.New(sessionKey[:])
assert.NoError(t, err)
o := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: sessionKey,
}
runTest(t, o)
})
} }
func BenchmarkObfs(b *testing.B) { func BenchmarkObfs(b *testing.B) {
@ -135,7 +80,7 @@ func BenchmarkObfs(b *testing.B) {
testPayload, testPayload,
} }
obfsBuf := make([]byte, len(testPayload)*2) obfsBuf := make([]byte, defaultSendRecvBufSize)
var key [32]byte var key [32]byte
rand.Read(key[:]) rand.Read(key[:])
@ -143,53 +88,40 @@ func BenchmarkObfs(b *testing.B) {
c, _ := aes.NewCipher(key[:]) c, _ := aes.NewCipher(key[:])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
b.Run("AES128GCM", func(b *testing.B) { b.Run("AES128GCM", func(b *testing.B) {
c, _ := aes.NewCipher(key[:16]) c, _ := aes.NewCipher(key[:16])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
b.Run("plain", func(b *testing.B) { b.Run("plain", func(b *testing.B) {
obfuscator := Obfuscator{ obfs := MakeObfs(key, nil)
payloadCipher: nil,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
b.Run("chacha20Poly1305", func(b *testing.B) { b.Run("chacha20Poly1305", func(b *testing.B) {
payloadCipher, _ := chacha20poly1305.New(key[:]) payloadCipher, _ := chacha20poly1305.New(key[:16])
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher,
sessionKey: key,
}
b.SetBytes(int64(len(testFrame.Payload))) b.SetBytes(int64(len(testFrame.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs(testFrame, obfsBuf, 0)
} }
}) })
} }
@ -204,73 +136,60 @@ func BenchmarkDeobfs(b *testing.B) {
testPayload, testPayload,
} }
obfsBuf := make([]byte, len(testPayload)*2) obfsBuf := make([]byte, defaultSendRecvBufSize)
var key [32]byte var key [32]byte
rand.Read(key[:]) rand.Read(key[:])
b.Run("AES256GCM", func(b *testing.B) { b.Run("AES256GCM", func(b *testing.B) {
c, _ := aes.NewCipher(key[:]) c, _ := aes.NewCipher(key[:])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{
payloadCipher: payloadCipher,
sessionKey: key,
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0) obfs := MakeObfs(key, payloadCipher)
n, _ := obfs(testFrame, obfsBuf, 0)
deobfs := MakeDeobfs(key, payloadCipher)
frame := new(Frame)
b.SetBytes(int64(n)) b.SetBytes(int64(n))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
b.Run("AES128GCM", func(b *testing.B) { b.Run("AES128GCM", func(b *testing.B) {
c, _ := aes.NewCipher(key[:16]) c, _ := aes.NewCipher(key[:16])
payloadCipher, _ := cipher.NewGCM(c) payloadCipher, _ := cipher.NewGCM(c)
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher, n, _ := obfs(testFrame, obfsBuf, 0)
sessionKey: key, deobfs := MakeDeobfs(key, payloadCipher)
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0)
frame := new(Frame)
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(n)) b.SetBytes(int64(n))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
b.Run("plain", func(b *testing.B) { b.Run("plain", func(b *testing.B) {
obfuscator := Obfuscator{ obfs := MakeObfs(key, nil)
payloadCipher: nil, n, _ := obfs(testFrame, obfsBuf, 0)
sessionKey: key, deobfs := MakeDeobfs(key, nil)
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0)
frame := new(Frame)
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(n)) b.SetBytes(int64(n))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
b.Run("chacha20Poly1305", func(b *testing.B) { b.Run("chacha20Poly1305", func(b *testing.B) {
payloadCipher, _ := chacha20poly1305.New(key[:]) payloadCipher, _ := chacha20poly1305.New(key[:16])
obfuscator := Obfuscator{ obfs := MakeObfs(key, payloadCipher)
payloadCipher: payloadCipher, n, _ := obfs(testFrame, obfsBuf, 0)
sessionKey: key, deobfs := MakeDeobfs(key, payloadCipher)
}
n, _ := obfuscator.obfuscate(testFrame, obfsBuf, 0)
frame := new(Frame)
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(n)) b.SetBytes(int64(n))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
obfuscator.deobfuscate(frame, obfsBuf[:n]) deobfs(obfsBuf[:n])
} }
}) })
} }

View File

@ -1,24 +1,15 @@
package multiplex package multiplex
import ( import (
"errors"
"io" "io"
"time" "time"
) )
var ErrTimeout = errors.New("deadline exceeded")
type recvBuffer interface { type recvBuffer interface {
// Read calls' err must be nil | io.EOF | io.ErrShortBuffer // Read calls' err must be nil | io.EOF | io.ErrShortBuffer
// Read should NOT return error on a closed streamBuffer with a non-empty buffer.
// Instead, it should behave as if it hasn't been closed. Closure is only relevant
// when the buffer is empty.
io.ReadCloser io.ReadCloser
Write(*Frame) (toBeClosed bool, err error) io.WriterTo
Write(Frame) (toBeClosed bool, err error)
SetReadDeadline(time time.Time) SetReadDeadline(time time.Time)
SetWriteToTimeout(d time.Duration)
} }
// size we want the amount of unread data in buffer to grow before recvBuffer.Write blocks.
// If the buffer grows larger than what the system's memory can offer at the time of recvBuffer.Write,
// a panic will happen.
const recvBufferSizeLimit = 1<<31 - 1

View File

@ -3,48 +3,39 @@ package multiplex
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"net" "net"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
const ( const (
acceptBacklog = 1024 acceptBacklog = 1024
defaultInactivityTimeout = 30 * time.Second // TODO: will this be a signature?
defaultMaxOnWireSize = 1<<14 + 256 // https://tools.ietf.org/html/rfc8446#section-5.2 defaultSendRecvBufSize = 20480
) )
var ErrBrokenSession = errors.New("broken session") var ErrBrokenSession = errors.New("broken session")
var errRepeatSessionClosing = errors.New("trying to close a closed session") var errRepeatSessionClosing = errors.New("trying to close a closed session")
var errRepeatStreamClosing = errors.New("trying to close a closed stream") var errRepeatStreamClosing = errors.New("trying to close a closed stream")
var errNoMultiplex = errors.New("a singleplexing session can have only one stream")
type switchboardStrategy int
type SessionConfig struct { type SessionConfig struct {
Obfuscator Obfuscator
// Valve is used to limit transmission rates, and record and limit usage
Valve Valve
Unordered bool Unordered bool
// A Singleplexing session always has just one stream MaxFrameSize int // maximum size of the frame, including the header
Singleplex bool SendBufferSize int
ReceiveBufferSize int
// maximum size of an obfuscated frame, including headers and overhead
MsgOnWireSizeLimit int
// InactivityTimeout sets the duration a Session waits while it has no active streams before it closes itself
InactivityTimeout time.Duration
} }
// A Session represents a self-contained communication chain between local and remote. It manages its streams,
// controls serialisation and encryption of data sent and received using the supplied Obfuscator, and send and receive
// data through a manged connection pool filled with underlying connections added to it.
type Session struct { type Session struct {
id uint32 id uint32
@ -55,16 +46,7 @@ type Session struct {
// atomic // atomic
activeStreamCount uint32 activeStreamCount uint32
streams sync.Map
streamsM sync.Mutex
streams map[uint32]*Stream
// For accepting new streams
acceptCh chan *Stream
// a pool of heap allocated frame objects so we don't have to allocate a new one each time we receive a frame
recvFramePool sync.Pool
streamObfsBufPool sync.Pool
// Switchboard manages all connections to remote // Switchboard manages all connections to remote
sb *switchboard sb *switchboard
@ -72,19 +54,14 @@ type Session struct {
// Used for LocalAddr() and RemoteAddr() etc. // Used for LocalAddr() and RemoteAddr() etc.
addrs atomic.Value addrs atomic.Value
// For accepting new streams
acceptCh chan *Stream
closed uint32 closed uint32
terminalMsgSetter sync.Once terminalMsg atomic.Value
terminalMsg string
// the max size passed to Write calls before it splits it into multiple frames maxStreamUnitWrite int // the max size passed to Write calls before it splits it into multiple frames
// i.e. the max size a piece of data can fit into a Frame.Payload
maxStreamUnitWrite int
// streamSendBufferSize sets the buffer size used to send data from a Stream (Stream.obfsBuf)
streamSendBufferSize int
// connReceiveBufferSize sets the buffer size used to receive data from an underlying Conn (allocated in
// switchboard.deplex)
connReceiveBufferSize int
} }
func MakeSession(id uint32, config SessionConfig) *Session { func MakeSession(id uint32, config SessionConfig) *Session {
@ -93,39 +70,39 @@ func MakeSession(id uint32, config SessionConfig) *Session {
SessionConfig: config, SessionConfig: config,
nextStreamID: 1, nextStreamID: 1,
acceptCh: make(chan *Stream, acceptBacklog), acceptCh: make(chan *Stream, acceptBacklog),
recvFramePool: sync.Pool{New: func() interface{} { return &Frame{} }},
streams: map[uint32]*Stream{},
} }
sesh.addrs.Store([]net.Addr{nil, nil}) sesh.addrs.Store([]net.Addr{nil, nil})
if config.Valve == nil { if config.Valve == nil {
sesh.Valve = UNLIMITED_VALVE sesh.Valve = UNLIMITED_VALVE
} }
if config.MsgOnWireSizeLimit <= 0 { if config.SendBufferSize <= 0 {
sesh.MsgOnWireSizeLimit = defaultMaxOnWireSize sesh.SendBufferSize = defaultSendRecvBufSize
} }
if config.InactivityTimeout == 0 { if config.ReceiveBufferSize <= 0 {
sesh.InactivityTimeout = defaultInactivityTimeout sesh.ReceiveBufferSize = defaultSendRecvBufSize
} }
if config.MaxFrameSize <= 0 {
sesh.MaxFrameSize = defaultSendRecvBufSize - 1024
}
// todo: validation. this must be smaller than the buffer sizes
sesh.maxStreamUnitWrite = sesh.MaxFrameSize - HEADER_LEN - sesh.Obfuscator.minOverhead
sesh.maxStreamUnitWrite = sesh.MsgOnWireSizeLimit - frameHeaderLength - maxExtraLen sbConfig := switchboardConfig{
sesh.streamSendBufferSize = sesh.MsgOnWireSizeLimit valve: sesh.Valve,
sesh.connReceiveBufferSize = 20480 // for backwards compatibility recvBufferSize: sesh.ReceiveBufferSize,
}
sesh.streamObfsBufPool = sync.Pool{New: func() interface{} { if sesh.Unordered {
b := make([]byte, sesh.streamSendBufferSize) log.Debug("Connection is unordered")
return &b sbConfig.strategy = UNIFORM_SPREAD
}} } else {
sbConfig.strategy = FIXED_CONN_MAPPING
sesh.sb = makeSwitchboard(sesh) }
time.AfterFunc(sesh.InactivityTimeout, sesh.checkTimeout) sesh.sb = makeSwitchboard(sesh, sbConfig)
go sesh.timeoutAfter(30 * time.Second)
return sesh return sesh
} }
func (sesh *Session) GetSessionKey() [32]byte {
return sesh.sessionKey
}
func (sesh *Session) streamCountIncr() uint32 { func (sesh *Session) streamCountIncr() uint32 {
return atomic.AddUint32(&sesh.activeStreamCount, 1) return atomic.AddUint32(&sesh.activeStreamCount, 1)
} }
@ -136,35 +113,25 @@ func (sesh *Session) streamCount() uint32 {
return atomic.LoadUint32(&sesh.activeStreamCount) return atomic.LoadUint32(&sesh.activeStreamCount)
} }
// AddConnection is used to add an underlying connection to the connection pool
func (sesh *Session) AddConnection(conn net.Conn) { func (sesh *Session) AddConnection(conn net.Conn) {
sesh.sb.addConn(conn) sesh.sb.addConn(conn)
addrs := []net.Addr{conn.LocalAddr(), conn.RemoteAddr()} addrs := []net.Addr{conn.LocalAddr(), conn.RemoteAddr()}
sesh.addrs.Store(addrs) sesh.addrs.Store(addrs)
} }
// OpenStream is similar to net.Dial. It opens up a new stream
func (sesh *Session) OpenStream() (*Stream, error) { func (sesh *Session) OpenStream() (*Stream, error) {
if sesh.IsClosed() { if sesh.IsClosed() {
return nil, ErrBrokenSession return nil, ErrBrokenSession
} }
id := atomic.AddUint32(&sesh.nextStreamID, 1) - 1 id := atomic.AddUint32(&sesh.nextStreamID, 1) - 1
// Because atomic.AddUint32 returns the value after incrementation // Because atomic.AddUint32 returns the value after incrementation
if sesh.Singleplex && id > 1 {
// if there are more than one streams, which shouldn't happen if we are
// singleplexing
return nil, errNoMultiplex
}
stream := makeStream(sesh, id) stream := makeStream(sesh, id)
sesh.streamsM.Lock() sesh.streams.Store(id, stream)
sesh.streams[id] = stream
sesh.streamsM.Unlock()
sesh.streamCountIncr() sesh.streamCountIncr()
log.Tracef("stream %v of session %v opened", id, sesh.id) log.Tracef("stream %v of session %v opened", id, sesh.id)
return stream, nil return stream, nil
} }
// Accept is similar to net.Listener's Accept(). It blocks and returns an incoming stream
func (sesh *Session) Accept() (net.Conn, error) { func (sesh *Session) Accept() (net.Conn, error) {
if sesh.IsClosed() { if sesh.IsClosed() {
return nil, ErrBrokenSession return nil, ErrBrokenSession
@ -178,47 +145,40 @@ func (sesh *Session) Accept() (net.Conn, error) {
} }
func (sesh *Session) closeStream(s *Stream, active bool) error { func (sesh *Session) closeStream(s *Stream, active bool) error {
if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) { if atomic.SwapUint32(&s.closed, 1) == 1 {
return fmt.Errorf("closing stream %v: %w", s.id, errRepeatStreamClosing) return fmt.Errorf("closing stream %v: %w", s.id, errRepeatStreamClosing)
} }
_ = s.recvBuf.Close() // recvBuf.Close should not return error _ = s.recvBuf.Close() // both datagramBuffer and streamBuffer won't return err on Close()
if active { if active {
tmpBuf := sesh.streamObfsBufPool.Get().(*[]byte)
// Notify remote that this stream is closed // Notify remote that this stream is closed
common.CryptoRandRead((*tmpBuf)[:1]) padding := genRandomPadding()
padLen := int((*tmpBuf)[0]) + 1 f := &Frame{
payload := (*tmpBuf)[frameHeaderLength : padLen+frameHeaderLength] StreamID: s.id,
common.CryptoRandRead(payload) Seq: s.nextSendSeq,
Closing: C_STREAM,
Payload: padding,
}
s.nextSendSeq++
// must be holding s.wirtingM on entry obfsBuf := make([]byte, len(padding)+64)
s.writingFrame.Closing = closingStream i, err := sesh.Obfs(f, obfsBuf, 0)
s.writingFrame.Payload = payload
err := s.obfuscateAndSend(*tmpBuf, frameHeaderLength)
sesh.streamObfsBufPool.Put(tmpBuf)
if err != nil { if err != nil {
return err return err
} }
log.Tracef("stream %v actively closed.", s.id) _, err = sesh.sb.send(obfsBuf[:i], &s.assignedConnId)
if err != nil {
return err
}
log.Tracef("stream %v actively closed. seq %v", s.id, f.Seq)
} else { } else {
log.Tracef("stream %v passively closed", s.id) log.Tracef("stream %v passively closed", s.id)
} }
// We set it as nil to signify that the stream id had existed before. sesh.streams.Store(s.id, nil) // id may or may not exist. if we use Delete(s.id) here it will panic
// If we Delete(s.id) straight away, later on in recvDataFromRemote, it will not be able to tell
// if the frame it received was from a new stream or a dying stream whose frame arrived late
sesh.streamsM.Lock()
sesh.streams[s.id] = nil
sesh.streamsM.Unlock()
if sesh.streamCountDecr() == 0 { if sesh.streamCountDecr() == 0 {
if sesh.Singleplex { log.Debugf("session %v has no active stream left", sesh.id)
return sesh.Close() go sesh.timeoutAfter(30 * time.Second)
} else {
log.Debugf("session %v has no active stream left", sesh.id)
time.AfterFunc(sesh.InactivityTimeout, sesh.checkTimeout)
}
} }
return nil return nil
} }
@ -227,112 +187,115 @@ func (sesh *Session) closeStream(s *Stream, active bool) error {
// to the stream buffer, otherwise it fetches the desired stream instance, or creates and stores one if it's a new // to the stream buffer, otherwise it fetches the desired stream instance, or creates and stores one if it's a new
// stream and then writes to the stream buffer // stream and then writes to the stream buffer
func (sesh *Session) recvDataFromRemote(data []byte) error { func (sesh *Session) recvDataFromRemote(data []byte) error {
frame := sesh.recvFramePool.Get().(*Frame) frame, err := sesh.Deobfs(data)
defer sesh.recvFramePool.Put(frame)
err := sesh.deobfuscate(frame, data)
if err != nil { if err != nil {
return fmt.Errorf("Failed to decrypt a frame for session %v: %v", sesh.id, err) return fmt.Errorf("Failed to decrypt a frame for session %v: %v", sesh.id, err)
} }
if frame.Closing == closingSession { if frame.Closing == C_SESSION {
sesh.SetTerminalMsg("Received a closing notification frame") sesh.SetTerminalMsg("Received a closing notification frame")
return sesh.passiveClose() return sesh.passiveClose()
} }
sesh.streamsM.Lock() newStream := makeStream(sesh, frame.StreamID)
if sesh.IsClosed() { existingStreamI, existing := sesh.streams.LoadOrStore(frame.StreamID, newStream)
sesh.streamsM.Unlock()
return ErrBrokenSession
}
existingStream, existing := sesh.streams[frame.StreamID]
if existing { if existing {
sesh.streamsM.Unlock() if existingStreamI == nil {
if existingStream == nil {
// this is when the stream existed before but has since been closed. We do nothing // this is when the stream existed before but has since been closed. We do nothing
return nil return nil
} }
return existingStream.recvFrame(frame) return existingStreamI.(*Stream).writeFrame(*frame)
} else { } else {
newStream := makeStream(sesh, frame.StreamID)
sesh.streams[frame.StreamID] = newStream
sesh.acceptCh <- newStream
sesh.streamsM.Unlock()
// new stream // new stream
sesh.streamCountIncr() sesh.streamCountIncr()
return newStream.recvFrame(frame) sesh.acceptCh <- newStream
return newStream.writeFrame(*frame)
} }
} }
func (sesh *Session) SetTerminalMsg(msg string) { func (sesh *Session) SetTerminalMsg(msg string) {
log.Debug("terminal message set to " + msg) sesh.terminalMsg.Store(msg)
sesh.terminalMsgSetter.Do(func() {
sesh.terminalMsg = msg
})
} }
func (sesh *Session) TerminalMsg() string { func (sesh *Session) TerminalMsg() string {
return sesh.terminalMsg msg := sesh.terminalMsg.Load()
} if msg != nil {
return msg.(string)
func (sesh *Session) closeSession() error { } else {
if !atomic.CompareAndSwapUint32(&sesh.closed, 0, 1) { return ""
log.Debugf("session %v has already been closed", sesh.id)
return errRepeatSessionClosing
} }
sesh.streamsM.Lock()
close(sesh.acceptCh)
for id, stream := range sesh.streams {
if stream != nil && atomic.CompareAndSwapUint32(&stream.closed, 0, 1) {
_ = stream.recvBuf.Close() // will not block
delete(sesh.streams, id)
sesh.streamCountDecr()
}
}
sesh.streamsM.Unlock()
return nil
} }
func (sesh *Session) passiveClose() error { func (sesh *Session) passiveClose() error {
log.Debugf("attempting to passively close session %v", sesh.id) log.Debugf("attempting to passively close session %v", sesh.id)
err := sesh.closeSession() if atomic.SwapUint32(&sesh.closed, 1) == 1 {
if err != nil { log.Debugf("session %v has already been closed", sesh.id)
return err return errRepeatSessionClosing
} }
sesh.acceptCh <- nil
sesh.streams.Range(func(key, streamI interface{}) bool {
if streamI == nil {
return true
}
stream := streamI.(*Stream)
atomic.StoreUint32(&stream.closed, 1)
_ = stream.recvBuf.Close() // will not block
sesh.streams.Delete(key)
sesh.streamCountDecr()
return true
})
sesh.sb.closeAll() sesh.sb.closeAll()
log.Debugf("session %v closed gracefully", sesh.id) log.Debugf("session %v closed gracefully", sesh.id)
return nil return nil
} }
func genRandomPadding() []byte {
lenB := make([]byte, 1)
common.CryptoRandRead(lenB)
pad := make([]byte, lenB[0])
common.CryptoRandRead(pad)
return pad
}
func (sesh *Session) Close() error { func (sesh *Session) Close() error {
log.Debugf("attempting to actively close session %v", sesh.id) log.Debugf("attempting to actively close session %v", sesh.id)
err := sesh.closeSession() if atomic.SwapUint32(&sesh.closed, 1) == 1 {
if err != nil { log.Debugf("session %v has already been closed", sesh.id)
return err return errRepeatSessionClosing
} }
// we send a notice frame telling remote to close the session sesh.acceptCh <- nil
buf := sesh.streamObfsBufPool.Get().(*[]byte) sesh.streams.Range(func(key, streamI interface{}) bool {
common.CryptoRandRead((*buf)[:1]) if streamI == nil {
padLen := int((*buf)[0]) + 1 return true
payload := (*buf)[frameHeaderLength : padLen+frameHeaderLength] }
common.CryptoRandRead(payload) stream := streamI.(*Stream)
atomic.StoreUint32(&stream.closed, 1)
_ = stream.recvBuf.Close() // will not block
sesh.streams.Delete(key)
sesh.streamCountDecr()
return true
})
pad := genRandomPadding()
f := &Frame{ f := &Frame{
StreamID: 0xffffffff, StreamID: 0xffffffff,
Seq: 0, Seq: 0,
Closing: closingSession, Closing: C_SESSION,
Payload: payload, Payload: pad,
} }
i, err := sesh.obfuscate(f, *buf, frameHeaderLength) obfsBuf := make([]byte, len(pad)+64)
i, err := sesh.Obfs(f, obfsBuf, 0)
if err != nil { if err != nil {
return err return err
} }
_, err = sesh.sb.send((*buf)[:i], new(net.Conn)) _, err = sesh.sb.send(obfsBuf[:i], new(uint32))
if err != nil { if err != nil {
return err return err
} }
sesh.sb.closeAll() sesh.sb.closeAll()
log.Debugf("session %v closed gracefully", sesh.id) log.Debugf("session %v closed gracefully", sesh.id)
return nil return nil
@ -342,7 +305,9 @@ func (sesh *Session) IsClosed() bool {
return atomic.LoadUint32(&sesh.closed) == 1 return atomic.LoadUint32(&sesh.closed) == 1
} }
func (sesh *Session) checkTimeout() { func (sesh *Session) timeoutAfter(to time.Duration) {
time.Sleep(to)
if sesh.streamCount() == 0 && !sesh.IsClosed() { if sesh.streamCount() == 0 && !sesh.IsClosed() {
sesh.SetTerminalMsg("timeout") sesh.SetTerminalMsg("timeout")
sesh.Close() sesh.Close()

View File

@ -1,24 +0,0 @@
//go:build gofuzz
// +build gofuzz
package multiplex
func setupSesh_fuzz(unordered bool) *Session {
obfuscator, _ := MakeObfuscator(EncryptionMethodPlain, [32]byte{})
seshConfig := SessionConfig{
Obfuscator: obfuscator,
Valve: nil,
Unordered: unordered,
}
return MakeSession(0, seshConfig)
}
func Fuzz(data []byte) int {
sesh := setupSesh_fuzz(false)
err := sesh.recvDataFromRemote(data)
if err == nil {
return 1
}
return 0
}

View File

@ -2,237 +2,173 @@ package multiplex
import ( import (
"bytes" "bytes"
"io" "github.com/cbeuw/connutil"
"io/ioutil"
"math/rand" "math/rand"
"net"
"strconv" "strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
var seshConfigs = map[string]SessionConfig{ var seshConfigOrdered = SessionConfig{}
"ordered": {},
"unordered": {Unordered: true},
}
var encryptionMethods = map[string]byte{
"plain": EncryptionMethodPlain,
"aes-256-gcm": EncryptionMethodAES256GCM,
"aes-128-gcm": EncryptionMethodAES128GCM,
"chacha20poly1305": EncryptionMethodChaha20Poly1305,
}
const testPayloadLen = 1024 var seshConfigUnordered = SessionConfig{
const obfsBufLen = testPayloadLen * 2 Unordered: true,
}
func TestRecvDataFromRemote(t *testing.T) { func TestRecvDataFromRemote(t *testing.T) {
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload)
f := &Frame{
1,
0,
0,
testPayload,
}
obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
t.Run("plain ordered", func(t *testing.T) {
obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfigOrdered)
n, _ := sesh.Obfs(f, obfsBuf, 0)
for seshType, seshConfig := range seshConfigs { err := sesh.recvDataFromRemote(obfsBuf[:n])
seshConfig := seshConfig if err != nil {
t.Run(seshType, func(t *testing.T) { t.Error(err)
var err error return
seshConfig.Obfuscator, err = MakeObfuscator(EncryptionMethodPlain, sessionKey) }
if err != nil { stream, err := sesh.Accept()
t.Fatalf("failed to make obfuscator: %v", err) if err != nil {
} t.Error(err)
t.Run("initial frame", func(t *testing.T) { return
sesh := MakeSession(0, seshConfig) }
obfsBuf := make([]byte, obfsBufLen)
f := Frame{
1,
0,
0,
make([]byte, testPayloadLen),
}
rand.Read(f.Payload)
n, err := sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
stream, err := sesh.Accept()
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen) resultPayload := make([]byte, testPayloadLen)
_, err = stream.Read(resultPayload) _, err = stream.Read(resultPayload)
assert.NoError(t, err) if err != nil {
t.Error(err)
return
}
if !bytes.Equal(testPayload, resultPayload) {
t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
}
})
t.Run("aes-gcm ordered", func(t *testing.T) {
obfuscator, _ := MakeObfuscator(E_METHOD_AES_GCM, sessionKey)
seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfigOrdered)
n, _ := sesh.Obfs(f, obfsBuf, 0)
assert.EqualValues(t, f.Payload, resultPayload) err := sesh.recvDataFromRemote(obfsBuf[:n])
}) if err != nil {
t.Error(err)
return
}
stream, err := sesh.Accept()
if err != nil {
t.Error(err)
return
}
t.Run("two frames in order", func(t *testing.T) { resultPayload := make([]byte, testPayloadLen)
sesh := MakeSession(0, seshConfig) _, err = stream.Read(resultPayload)
obfsBuf := make([]byte, obfsBufLen) if err != nil {
f := Frame{ t.Error(err)
1, return
0, }
0, if !bytes.Equal(testPayload, resultPayload) {
make([]byte, testPayloadLen), t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
} }
rand.Read(f.Payload) })
n, err := sesh.obfuscate(&f, obfsBuf, 0) t.Run("chacha20-poly1305 ordered", func(t *testing.T) {
assert.NoError(t, err) obfuscator, _ := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
err = sesh.recvDataFromRemote(obfsBuf[:n]) seshConfigOrdered.Obfuscator = obfuscator
assert.NoError(t, err) sesh := MakeSession(0, seshConfigOrdered)
stream, err := sesh.Accept() n, _ := sesh.Obfs(f, obfsBuf, 0)
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen) err := sesh.recvDataFromRemote(obfsBuf[:n])
_, err = io.ReadFull(stream, resultPayload) if err != nil {
assert.NoError(t, err) t.Error(err)
return
}
stream, err := sesh.Accept()
if err != nil {
t.Error(err)
return
}
assert.EqualValues(t, f.Payload, resultPayload) resultPayload := make([]byte, testPayloadLen)
_, err = stream.Read(resultPayload)
if err != nil {
t.Error(err)
return
}
if !bytes.Equal(testPayload, resultPayload) {
t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
}
})
f.Seq += 1 t.Run("plain unordered", func(t *testing.T) {
rand.Read(f.Payload) obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
n, err = sesh.obfuscate(&f, obfsBuf, 0) seshConfigUnordered.Obfuscator = obfuscator
assert.NoError(t, err) sesh := MakeSession(0, seshConfigOrdered)
err = sesh.recvDataFromRemote(obfsBuf[:n]) n, _ := sesh.Obfs(f, obfsBuf, 0)
assert.NoError(t, err)
_, err = io.ReadFull(stream, resultPayload) err := sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err) if err != nil {
t.Error(err)
return
}
stream, err := sesh.Accept()
if err != nil {
t.Error(err)
return
}
assert.EqualValues(t, f.Payload, resultPayload) resultPayload := make([]byte, testPayloadLen)
}) _, err = stream.Read(resultPayload)
if err != nil {
t.Run("two frames in order", func(t *testing.T) { t.Error(err)
sesh := MakeSession(0, seshConfig) return
obfsBuf := make([]byte, obfsBufLen) }
f := Frame{ if !bytes.Equal(testPayload, resultPayload) {
1, t.Errorf("Expecting %x, got %x", testPayload, resultPayload)
0, }
0, })
make([]byte, testPayloadLen),
}
rand.Read(f.Payload)
n, err := sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
stream, err := sesh.Accept()
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen)
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, f.Payload, resultPayload)
f.Seq += 1
rand.Read(f.Payload)
n, err = sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, f.Payload, resultPayload)
})
if seshType == "ordered" {
t.Run("frames out of order", func(t *testing.T) {
sesh := MakeSession(0, seshConfig)
obfsBuf := make([]byte, obfsBufLen)
f := Frame{
1,
0,
0,
nil,
}
// First frame
seq0 := make([]byte, testPayloadLen)
rand.Read(seq0)
f.Seq = 0
f.Payload = seq0
n, err := sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
// Third frame
seq2 := make([]byte, testPayloadLen)
rand.Read(seq2)
f.Seq = 2
f.Payload = seq2
n, err = sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
// Second frame
seq1 := make([]byte, testPayloadLen)
rand.Read(seq1)
f.Seq = 1
f.Payload = seq1
n, err = sesh.obfuscate(&f, obfsBuf, 0)
assert.NoError(t, err)
err = sesh.recvDataFromRemote(obfsBuf[:n])
assert.NoError(t, err)
// Expect things to receive in order
stream, err := sesh.Accept()
assert.NoError(t, err)
resultPayload := make([]byte, testPayloadLen)
// First
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, seq0, resultPayload)
// Second
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, seq1, resultPayload)
// Third
_, err = io.ReadFull(stream, resultPayload)
assert.NoError(t, err)
assert.EqualValues(t, seq2, resultPayload)
})
}
})
}
} }
func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) { func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen) testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload) rand.Read(testPayload)
obfsBuf := make([]byte, obfsBufLen) obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
seshConfig := seshConfigs["ordered"] obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfig.Obfuscator, _ = MakeObfuscator(EncryptionMethodPlain, sessionKey) seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfig)
sesh := MakeSession(0, seshConfigOrdered)
f1 := &Frame{ f1 := &Frame{
1, 1,
0, 0,
closingNothing, C_NOOP,
testPayload, testPayload,
} }
// create stream 1 // create stream 1
n, _ := sesh.obfuscate(f1, obfsBuf, 0) n, _ := sesh.Obfs(f1, obfsBuf, 0)
err := sesh.recvDataFromRemote(obfsBuf[:n]) err := sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving normal frame for stream 1: %v", err) t.Fatalf("receiving normal frame for stream 1: %v", err)
} }
sesh.streamsM.Lock() _, ok := sesh.streams.Load(f1.StreamID)
_, ok := sesh.streams[f1.StreamID]
sesh.streamsM.Unlock()
if !ok { if !ok {
t.Fatal("failed to fetch stream 1 after receiving it") t.Fatal("failed to fetch stream 1 after receiving it")
} }
@ -244,18 +180,16 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
f2 := &Frame{ f2 := &Frame{
2, 2,
0, 0,
closingNothing, C_NOOP,
testPayload, testPayload,
} }
n, _ = sesh.obfuscate(f2, obfsBuf, 0) n, _ = sesh.Obfs(f2, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving normal frame for stream 2: %v", err) t.Fatalf("receiving normal frame for stream 2: %v", err)
} }
sesh.streamsM.Lock() s2I, ok := sesh.streams.Load(f2.StreamID)
s2M, ok := sesh.streams[f2.StreamID] if s2I == nil || !ok {
sesh.streamsM.Unlock()
if s2M == nil || !ok {
t.Fatal("failed to fetch stream 2 after receiving it") t.Fatal("failed to fetch stream 2 after receiving it")
} }
if sesh.streamCount() != 2 { if sesh.streamCount() != 2 {
@ -266,18 +200,16 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
f1CloseStream := &Frame{ f1CloseStream := &Frame{
1, 1,
1, 1,
closingStream, C_STREAM,
testPayload, testPayload,
} }
n, _ = sesh.obfuscate(f1CloseStream, obfsBuf, 0) n, _ = sesh.Obfs(f1CloseStream, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving stream closing frame for stream 1: %v", err) t.Fatalf("receiving stream closing frame for stream 1: %v", err)
} }
sesh.streamsM.Lock() s1I, _ := sesh.streams.Load(f1.StreamID)
s1M, _ := sesh.streams[f1.StreamID] if s1I != nil {
sesh.streamsM.Unlock()
if s1M != nil {
t.Fatal("stream 1 still exist after receiving stream close") t.Fatal("stream 1 still exist after receiving stream close")
} }
s1, _ := sesh.Accept() s1, _ := sesh.Accept()
@ -298,30 +230,27 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
} }
// close stream 1 again // close stream 1 again
n, _ = sesh.obfuscate(f1CloseStream, obfsBuf, 0) n, _ = sesh.Obfs(f1CloseStream, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving stream closing frame for stream 1 %v", err) t.Fatalf("receiving stream closing frame for stream 1 %v", err)
} }
sesh.streamsM.Lock() s1I, _ = sesh.streams.Load(f1.StreamID)
s1M, _ = sesh.streams[f1.StreamID] if s1I != nil {
sesh.streamsM.Unlock()
if s1M != nil {
t.Error("stream 1 exists after receiving stream close for the second time") t.Error("stream 1 exists after receiving stream close for the second time")
} }
streamCount := sesh.streamCount() if sesh.streamCount() != 1 {
if streamCount != 1 { t.Error("stream count isn't 1 after stream 1 closed twice")
t.Errorf("stream count is %v after stream 1 closed twice, expected 1", streamCount)
} }
// close session // close session
fCloseSession := &Frame{ fCloseSession := &Frame{
StreamID: 0xffffffff, StreamID: 0xffffffff,
Seq: 0, Seq: 0,
Closing: closingSession, Closing: C_SESSION,
Payload: testPayload, Payload: testPayload,
} }
n, _ = sesh.obfuscate(fCloseSession, obfsBuf, 0) n, _ = sesh.Obfs(fCloseSession, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving session closing frame: %v", err) t.Fatalf("receiving session closing frame: %v", err)
@ -345,32 +274,32 @@ func TestRecvDataFromRemote_Closing_InOrder(t *testing.T) {
func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) { func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) {
// Tests for when the closing frame of a stream is received first before any data frame // Tests for when the closing frame of a stream is received first before any data frame
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen) testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload) rand.Read(testPayload)
obfsBuf := make([]byte, obfsBufLen) obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
seshConfig := seshConfigs["ordered"] obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfig.Obfuscator, _ = MakeObfuscator(EncryptionMethodPlain, sessionKey) seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfig)
sesh := MakeSession(0, seshConfigOrdered)
// receive stream 1 closing first // receive stream 1 closing first
f1CloseStream := &Frame{ f1CloseStream := &Frame{
1, 1,
1, 1,
closingStream, C_STREAM,
testPayload, testPayload,
} }
n, _ := sesh.obfuscate(f1CloseStream, obfsBuf, 0) n, _ := sesh.Obfs(f1CloseStream, obfsBuf, 0)
err := sesh.recvDataFromRemote(obfsBuf[:n]) err := sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving out of order stream closing frame for stream 1: %v", err) t.Fatalf("receiving out of order stream closing frame for stream 1: %v", err)
} }
sesh.streamsM.Lock() _, ok := sesh.streams.Load(f1CloseStream.StreamID)
_, ok := sesh.streams[f1CloseStream.StreamID]
sesh.streamsM.Unlock()
if !ok { if !ok {
t.Fatal("stream 1 doesn't exist") t.Fatal("stream 1 doesn't exist")
} }
@ -382,10 +311,10 @@ func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) {
f1 := &Frame{ f1 := &Frame{
1, 1,
0, 0,
closingNothing, C_NOOP,
testPayload, testPayload,
} }
n, _ = sesh.obfuscate(f1, obfsBuf, 0) n, _ = sesh.Obfs(f1, obfsBuf, 0)
err = sesh.recvDataFromRemote(obfsBuf[:n]) err = sesh.recvDataFromRemote(obfsBuf[:n])
if err != nil { if err != nil {
t.Fatalf("receiving normal frame for stream 1: %v", err) t.Fatalf("receiving normal frame for stream 1: %v", err)
@ -406,235 +335,166 @@ func TestRecvDataFromRemote_Closing_OutOfOrder(t *testing.T) {
} }
} }
func TestParallelStreams(t *testing.T) { func TestParallel(t *testing.T) {
rand.Seed(0)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
obfuscator, _ := MakeObfuscator(EncryptionMethodPlain, sessionKey)
for seshType, seshConfig := range seshConfigs { obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfig := seshConfig seshConfigOrdered.Obfuscator = obfuscator
t.Run(seshType, func(t *testing.T) { sesh := MakeSession(0, seshConfigOrdered)
seshConfig.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfig)
numStreams := acceptBacklog numStreams := acceptBacklog
seqs := make([]*uint64, numStreams) seqs := make([]*uint64, numStreams)
for i := range seqs { for i := range seqs {
seqs[i] = new(uint64) seqs[i] = new(uint64)
} }
randFrame := func() *Frame { randFrame := func() *Frame {
id := rand.Intn(numStreams) id := rand.Intn(numStreams)
return &Frame{ return &Frame{
uint32(id), uint32(id),
atomic.AddUint64(seqs[id], 1) - 1, atomic.AddUint64(seqs[id], 1) - 1,
uint8(rand.Intn(2)), uint8(rand.Intn(2)),
[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 4},
} }
} }
const numOfTests = 5000 numOfTests := 5000
tests := make([]struct { tests := make([]struct {
name string name string
frame *Frame frame *Frame
}, numOfTests) }, numOfTests)
for i := range tests { for i := range tests {
tests[i].name = strconv.Itoa(i) tests[i].name = strconv.Itoa(i)
tests[i].frame = randFrame() tests[i].frame = randFrame()
} }
var wg sync.WaitGroup var wg sync.WaitGroup
for _, tc := range tests { for _, tc := range tests {
wg.Add(1) wg.Add(1)
go func(frame *Frame) { go func(frame *Frame) {
obfsBuf := make([]byte, obfsBufLen) data := make([]byte, 1000)
n, _ := sesh.obfuscate(frame, obfsBuf, 0) n, _ := sesh.Obfs(frame, data, 0)
obfsBuf = obfsBuf[0:n] data = data[0:n]
err := sesh.recvDataFromRemote(obfsBuf) err := sesh.recvDataFromRemote(data)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
}
wg.Done()
}(tc.frame)
} }
wg.Done()
}(tc.frame)
}
wg.Wait() wg.Wait()
sc := int(sesh.streamCount()) sc := int(sesh.streamCount())
var count int var count int
sesh.streamsM.Lock() sesh.streams.Range(func(_, s interface{}) bool {
for _, s := range sesh.streams { if s != nil {
if s != nil { count++
count++ }
} return true
} })
sesh.streamsM.Unlock() if sc != count {
if sc != count { t.Errorf("broken referential integrety: actual %v, reference count: %v", count, sc)
t.Errorf("broken referential integrety: actual %v, reference count: %v", count, sc)
}
})
} }
} }
func TestStream_SetReadDeadline(t *testing.T) { func TestStream_SetReadDeadline(t *testing.T) {
for seshType, seshConfig := range seshConfigs {
seshConfig := seshConfig
t.Run(seshType, func(t *testing.T) {
sesh := MakeSession(0, seshConfig)
sesh.AddConnection(connutil.Discard())
t.Run("read after deadline set", func(t *testing.T) {
stream, _ := sesh.OpenStream()
_ = stream.SetReadDeadline(time.Now().Add(-1 * time.Second))
_, err := stream.Read(make([]byte, 1))
if err != ErrTimeout {
t.Errorf("expecting error %v, got %v", ErrTimeout, err)
}
})
t.Run("unblock when deadline passed", func(t *testing.T) {
stream, _ := sesh.OpenStream()
done := make(chan struct{})
go func() {
_, _ = stream.Read(make([]byte, 1))
done <- struct{}{}
}()
_ = stream.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
select {
case <-done:
return
case <-time.After(500 * time.Millisecond):
t.Error("Read did not unblock after deadline has passed")
}
})
})
}
}
func TestSession_timeoutAfter(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
obfuscator, _ := MakeObfuscator(EncryptionMethodPlain, sessionKey) obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
seshConfigOrdered.Obfuscator = obfuscator
for seshType, seshConfig := range seshConfigs { testReadDeadline := func(sesh *Session) {
seshConfig := seshConfig t.Run("read after deadline set", func(t *testing.T) {
t.Run(seshType, func(t *testing.T) { stream, _ := sesh.OpenStream()
seshConfig.Obfuscator = obfuscator _ = stream.SetReadDeadline(time.Now().Add(-1 * time.Second))
seshConfig.InactivityTimeout = 100 * time.Millisecond _, err := stream.Read(make([]byte, 1))
sesh := MakeSession(0, seshConfig) if err != ErrTimeout {
t.Errorf("expecting error %v, got %v", ErrTimeout, err)
}
})
assert.Eventually(t, func() bool { t.Run("unblock when deadline passed", func(t *testing.T) {
return sesh.IsClosed() stream, _ := sesh.OpenStream()
}, 5*seshConfig.InactivityTimeout, seshConfig.InactivityTimeout, "session should have timed out")
done := make(chan struct{})
go func() {
_, _ = stream.Read(make([]byte, 1))
done <- struct{}{}
}()
_ = stream.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
select {
case <-done:
return
case <-time.After(500 * time.Millisecond):
t.Error("Read did not unblock after deadline has passed")
}
}) })
} }
sesh := MakeSession(0, seshConfigOrdered)
sesh.AddConnection(connutil.Discard())
testReadDeadline(sesh)
sesh = MakeSession(0, seshConfigUnordered)
sesh.AddConnection(connutil.Discard())
testReadDeadline(sesh)
} }
func BenchmarkRecvDataFromRemote(b *testing.B) { func BenchmarkRecvDataFromRemote_Ordered(b *testing.B) {
testPayloadLen := 1024
testPayload := make([]byte, testPayloadLen) testPayload := make([]byte, testPayloadLen)
rand.Read(testPayload) rand.Read(testPayload)
f := Frame{ f := &Frame{
1, 1,
0, 0,
0, 0,
testPayload, testPayload,
} }
obfsBuf := make([]byte, 17000)
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
const maxIter = 500_000 // run with -benchtime 500000x to avoid index out of bounds panic b.Run("plain", func(b *testing.B) {
for name, ep := range encryptionMethods { obfuscator, _ := MakeObfuscator(E_METHOD_PLAIN, sessionKey)
ep := ep seshConfigOrdered.Obfuscator = obfuscator
b.Run(name, func(b *testing.B) { sesh := MakeSession(0, seshConfigOrdered)
for seshType, seshConfig := range seshConfigs { n, _ := sesh.Obfs(f, obfsBuf, 0)
b.Run(seshType, func(b *testing.B) {
f := f
seshConfig.Obfuscator, _ = MakeObfuscator(ep, sessionKey)
sesh := MakeSession(0, seshConfig)
go func() { b.SetBytes(int64(len(f.Payload)))
stream, _ := sesh.Accept() b.ResetTimer()
io.Copy(ioutil.Discard, stream) for i := 0; i < b.N; i++ {
}() sesh.recvDataFromRemote(obfsBuf[:n])
}
})
binaryFrames := [maxIter][]byte{} b.Run("aes-gcm", func(b *testing.B) {
for i := 0; i < maxIter; i++ { obfuscator, _ := MakeObfuscator(E_METHOD_AES_GCM, sessionKey)
obfsBuf := make([]byte, obfsBufLen) seshConfigOrdered.Obfuscator = obfuscator
n, _ := sesh.obfuscate(&f, obfsBuf, 0) sesh := MakeSession(0, seshConfigOrdered)
binaryFrames[i] = obfsBuf[:n] n, _ := sesh.Obfs(f, obfsBuf, 0)
f.Seq++
}
b.SetBytes(int64(len(f.Payload))) b.SetBytes(int64(len(f.Payload)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
sesh.recvDataFromRemote(binaryFrames[i]) sesh.recvDataFromRemote(obfsBuf[:n])
} }
}) })
}
}) b.Run("chacha20-poly1305", func(b *testing.B) {
} obfuscator, _ := MakeObfuscator(E_METHOD_CHACHA20_POLY1305, sessionKey)
} seshConfigOrdered.Obfuscator = obfuscator
sesh := MakeSession(0, seshConfigOrdered)
func BenchmarkMultiStreamWrite(b *testing.B) { n, _ := sesh.Obfs(f, obfsBuf, 0)
var sessionKey [32]byte
rand.Read(sessionKey[:]) b.SetBytes(int64(len(f.Payload)))
b.ResetTimer()
testPayload := make([]byte, testPayloadLen) for i := 0; i < b.N; i++ {
sesh.recvDataFromRemote(obfsBuf[:n])
for name, ep := range encryptionMethods { }
b.Run(name, func(b *testing.B) { })
for seshType, seshConfig := range seshConfigs {
b.Run(seshType, func(b *testing.B) {
seshConfig.Obfuscator, _ = MakeObfuscator(ep, sessionKey)
sesh := MakeSession(0, seshConfig)
sesh.AddConnection(connutil.Discard())
b.ResetTimer()
b.SetBytes(testPayloadLen)
b.RunParallel(func(pb *testing.PB) {
stream, _ := sesh.OpenStream()
for pb.Next() {
stream.Write(testPayload)
}
})
})
}
})
}
}
func BenchmarkLatency(b *testing.B) {
var sessionKey [32]byte
rand.Read(sessionKey[:])
for name, ep := range encryptionMethods {
b.Run(name, func(b *testing.B) {
for seshType, seshConfig := range seshConfigs {
b.Run(seshType, func(b *testing.B) {
seshConfig.Obfuscator, _ = MakeObfuscator(ep, sessionKey)
clientSesh := MakeSession(0, seshConfig)
serverSesh := MakeSession(0, seshConfig)
c, s := net.Pipe()
clientSesh.AddConnection(c)
serverSesh.AddConnection(s)
buf := make([]byte, 64)
clientStream, _ := clientSesh.OpenStream()
clientStream.Write(buf)
serverStream, _ := serverSesh.Accept()
io.ReadFull(serverStream, buf)
b.ResetTimer()
for i := 0; i < b.N; i++ {
clientStream.Write(buf)
io.ReadFull(serverStream, buf)
}
})
}
})
}
} }

View File

@ -6,60 +6,52 @@ import (
"net" "net"
"time" "time"
log "github.com/sirupsen/logrus"
"sync" "sync"
"sync/atomic" "sync/atomic"
log "github.com/sirupsen/logrus"
) )
var ErrBrokenStream = errors.New("broken stream") var ErrBrokenStream = errors.New("broken stream")
// Stream implements net.Conn. It represents an optionally-ordered, full-duplex, self-contained connection.
// If the session it belongs to runs in ordered mode, it provides ordering guarantee regardless of the underlying
// connection used.
// If the underlying connections the session uses are reliable, Stream is reliable. If they are not, Stream does not
// guarantee reliability.
type Stream struct { type Stream struct {
id uint32 id uint32
session *Session session *Session
// a buffer (implemented as an asynchronous buffered pipe) to put data we've received from recvFrame but hasn't
// been read by the consumer through Read or WriteTo.
recvBuf recvBuffer recvBuf recvBuffer
writingM sync.Mutex nextSendSeq uint64
writingFrame Frame // we do the allocation here to save repeated allocations in Write and ReadFrom
writingM sync.Mutex
// atomic // atomic
closed uint32 closed uint32
// When we want order guarantee (i.e. session.Unordered is false), // only alloc when writing to the stream
// we assign each stream a fixed underlying connection. allocIdempot sync.Once
// If the underlying connections the session uses provide ordering guarantee (most likely TCP), obfsBuf []byte
// recvBuffer (implemented by streamBuffer under ordered mode) will not receive out-of-order packets
// so it won't have to use its priority queue to sort it.
// This is not used in unordered connection mode
assignedConn net.Conn
readFromTimeout time.Duration // we assign each stream a fixed underlying TCP connection to utilise order guarantee provided by TCP itself
// so that frameSorter should have few to none ooo frames to deal with
// overall the streams in a session should be uniformly distributed across all connections
// This is not used in unordered connection mode
assignedConnId uint32
rfTimeout time.Duration
} }
func makeStream(sesh *Session, id uint32) *Stream { func makeStream(sesh *Session, id uint32) *Stream {
var recvBuf recvBuffer
if sesh.Unordered {
recvBuf = NewDatagramBuffer()
} else {
recvBuf = NewStreamBuffer()
}
stream := &Stream{ stream := &Stream{
id: id, id: id,
session: sesh, session: sesh,
writingFrame: Frame{ recvBuf: recvBuf,
StreamID: id,
Seq: 0,
Closing: closingNothing,
},
}
if sesh.Unordered {
stream.recvBuf = NewDatagramBufferedPipe()
} else {
stream.recvBuf = NewStreamBuffer()
} }
return stream return stream
@ -67,8 +59,7 @@ func makeStream(sesh *Session, id uint32) *Stream {
func (s *Stream) isClosed() bool { return atomic.LoadUint32(&s.closed) == 1 } func (s *Stream) isClosed() bool { return atomic.LoadUint32(&s.closed) == 1 }
// receive a readily deobfuscated Frame so its payload can later be Read func (s *Stream) writeFrame(frame Frame) error {
func (s *Stream) recvFrame(frame *Frame) error {
toBeClosed, err := s.recvBuf.Write(frame) toBeClosed, err := s.recvBuf.Write(frame)
if toBeClosed { if toBeClosed {
err = s.passiveClose() err = s.passiveClose()
@ -96,14 +87,25 @@ func (s *Stream) Read(buf []byte) (n int, err error) {
return return
} }
func (s *Stream) obfuscateAndSend(buf []byte, payloadOffsetInBuf int) error { func (s *Stream) WriteTo(w io.Writer) (int64, error) {
cipherTextLen, err := s.session.obfuscate(&s.writingFrame, buf, payloadOffsetInBuf) // will keep writing until the underlying buffer is closed
s.writingFrame.Seq++ n, err := s.recvBuf.WriteTo(w)
log.Tracef("%v read from stream %v with err %v", n, s.id, err)
if err == io.EOF {
return n, ErrBrokenStream
}
return n, nil
}
func (s *Stream) sendFrame(f *Frame, framePayloadOffset int) error {
var cipherTextLen int
cipherTextLen, err := s.session.Obfs(f, s.obfsBuf, framePayloadOffset)
if err != nil { if err != nil {
return err return err
} }
_, err = s.session.sb.send(buf[:cipherTextLen], &s.assignedConn) _, err = s.session.sb.send(s.obfsBuf[:cipherTextLen], &s.assignedConnId)
log.Tracef("%v sent to remote through stream %v with err %v. seq: %v", len(f.Payload), s.id, err, f.Seq)
if err != nil { if err != nil {
if err == errBrokenSwitchboard { if err == errBrokenSwitchboard {
s.session.SetTerminalMsg(err.Error()) s.session.SetTerminalMsg(err.Error())
@ -122,24 +124,28 @@ func (s *Stream) Write(in []byte) (n int, err error) {
return 0, ErrBrokenStream return 0, ErrBrokenStream
} }
if s.obfsBuf == nil {
s.obfsBuf = make([]byte, s.session.SendBufferSize)
}
for n < len(in) { for n < len(in) {
var framePayload []byte var framePayload []byte
if len(in)-n <= s.session.maxStreamUnitWrite { if len(in)-n <= s.session.maxStreamUnitWrite {
// if we can fit remaining data of in into one frame
framePayload = in[n:] framePayload = in[n:]
} else { } else {
// if we have to split if s.session.Unordered { // no splitting
if s.session.Unordered {
// but we are not allowed to
err = io.ErrShortBuffer err = io.ErrShortBuffer
return return
} }
framePayload = in[n : s.session.maxStreamUnitWrite+n] framePayload = in[n : s.session.maxStreamUnitWrite+n]
} }
s.writingFrame.Payload = framePayload f := &Frame{
buf := s.session.streamObfsBufPool.Get().(*[]byte) StreamID: s.id,
err = s.obfuscateAndSend(*buf, 0) Seq: s.nextSendSeq,
s.session.streamObfsBufPool.Put(buf) Closing: C_NOOP,
Payload: framePayload,
}
s.nextSendSeq++
err = s.sendFrame(f, 0)
if err != nil { if err != nil {
return return
} }
@ -148,34 +154,36 @@ func (s *Stream) Write(in []byte) (n int, err error) {
return return
} }
// ReadFrom continuously read data from r and send it off, until either r returns error or nothing has been read
// for readFromTimeout amount of time
func (s *Stream) ReadFrom(r io.Reader) (n int64, err error) { func (s *Stream) ReadFrom(r io.Reader) (n int64, err error) {
if s.obfsBuf == nil {
s.obfsBuf = make([]byte, s.session.SendBufferSize)
}
for { for {
if s.readFromTimeout != 0 { if s.rfTimeout != 0 {
if rder, ok := r.(net.Conn); !ok { if rder, ok := r.(net.Conn); !ok {
log.Warn("ReadFrom timeout is set but reader doesn't implement SetReadDeadline") log.Warn("ReadFrom timeout is set but reader doesn't implement SetReadDeadline")
} else { } else {
rder.SetReadDeadline(time.Now().Add(s.readFromTimeout)) rder.SetReadDeadline(time.Now().Add(s.rfTimeout))
} }
} }
buf := s.session.streamObfsBufPool.Get().(*[]byte) read, er := r.Read(s.obfsBuf[HEADER_LEN : HEADER_LEN+s.session.maxStreamUnitWrite])
read, er := r.Read((*buf)[frameHeaderLength : frameHeaderLength+s.session.maxStreamUnitWrite])
if er != nil { if er != nil {
return n, er return n, er
} }
// the above read may have been unblocked by another goroutine calling stream.Close(), so we need
// to check that here
if s.isClosed() { if s.isClosed() {
return n, ErrBrokenStream return n, ErrBrokenStream
} }
s.writingM.Lock() s.writingM.Lock()
s.writingFrame.Payload = (*buf)[frameHeaderLength : frameHeaderLength+read] f := &Frame{
err = s.obfuscateAndSend(*buf, frameHeaderLength) StreamID: s.id,
Seq: s.nextSendSeq,
Closing: C_NOOP,
Payload: s.obfsBuf[HEADER_LEN : HEADER_LEN+read],
}
s.nextSendSeq++
err = s.sendFrame(f, HEADER_LEN)
s.writingM.Unlock() s.writingM.Unlock()
s.session.streamObfsBufPool.Put(buf)
if err != nil { if err != nil {
return return
@ -196,16 +204,16 @@ func (s *Stream) Close() error {
return s.session.closeStream(s, true) return s.session.closeStream(s, true)
} }
// the following functions are purely for implementing net.Conn interface.
// they are not used
var errNotImplemented = errors.New("Not implemented")
func (s *Stream) LocalAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[0] } func (s *Stream) LocalAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[0] }
func (s *Stream) RemoteAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[1] } func (s *Stream) RemoteAddr() net.Addr { return s.session.addrs.Load().([]net.Addr)[1] }
func (s *Stream) SetReadDeadline(t time.Time) error { s.recvBuf.SetReadDeadline(t); return nil }
func (s *Stream) SetReadFromTimeout(d time.Duration) { s.readFromTimeout = d }
var errNotImplemented = errors.New("Not implemented")
// the following functions are purely for implementing net.Conn interface.
// they are not used
// TODO: implement the following // TODO: implement the following
func (s *Stream) SetDeadline(t time.Time) error { return errNotImplemented } func (s *Stream) SetDeadline(t time.Time) error { return errNotImplemented }
func (s *Stream) SetWriteToTimeout(d time.Duration) { s.recvBuf.SetWriteToTimeout(d) }
func (s *Stream) SetReadDeadline(t time.Time) error { s.recvBuf.SetReadDeadline(t); return nil }
func (s *Stream) SetReadFromTimeout(d time.Duration) { s.rfTimeout = d }
func (s *Stream) SetWriteDeadline(t time.Time) error { return errNotImplemented } func (s *Stream) SetWriteDeadline(t time.Time) error { return errNotImplemented }

View File

@ -13,6 +13,7 @@ package multiplex
import ( import (
"container/heap" "container/heap"
"fmt" "fmt"
"io"
"sync" "sync"
"time" "time"
) )
@ -47,27 +48,23 @@ type streamBuffer struct {
nextRecvSeq uint64 nextRecvSeq uint64
sh sorterHeap sh sorterHeap
buf *streamBufferedPipe buf *bufferedPipe
} }
// streamBuffer is a wrapper around streamBufferedPipe.
// Its main function is to sort frames in order, and wait for frames to arrive
// if they have arrived out-of-order. Then it writes the payload of frames into
// a streamBufferedPipe.
func NewStreamBuffer() *streamBuffer { func NewStreamBuffer() *streamBuffer {
sb := &streamBuffer{ sb := &streamBuffer{
sh: []*Frame{}, sh: []*Frame{},
buf: NewStreamBufferedPipe(), buf: NewBufferedPipe(),
} }
return sb return sb
} }
func (sb *streamBuffer) Write(f *Frame) (toBeClosed bool, err error) { func (sb *streamBuffer) Write(f Frame) (toBeClosed bool, err error) {
sb.recvM.Lock() sb.recvM.Lock()
defer sb.recvM.Unlock() defer sb.recvM.Unlock()
// when there'fs no ooo packages in heap and we receive the next package in order // when there'fs no ooo packages in heap and we receive the next package in order
if len(sb.sh) == 0 && f.Seq == sb.nextRecvSeq { if len(sb.sh) == 0 && f.Seq == sb.nextRecvSeq {
if f.Closing != closingNothing { if f.Closing != C_NOOP {
return true, nil return true, nil
} else { } else {
sb.buf.Write(f.Payload) sb.buf.Write(f.Payload)
@ -80,14 +77,11 @@ func (sb *streamBuffer) Write(f *Frame) (toBeClosed bool, err error) {
return false, fmt.Errorf("seq %v is smaller than nextRecvSeq %v", f.Seq, sb.nextRecvSeq) return false, fmt.Errorf("seq %v is smaller than nextRecvSeq %v", f.Seq, sb.nextRecvSeq)
} }
saved := *f heap.Push(&sb.sh, &f)
saved.Payload = make([]byte, len(f.Payload))
copy(saved.Payload, f.Payload)
heap.Push(&sb.sh, &saved)
// Keep popping from the heap until empty or to the point that the wanted seq was not received // Keep popping from the heap until empty or to the point that the wanted seq was not received
for len(sb.sh) > 0 && sb.sh[0].Seq == sb.nextRecvSeq { for len(sb.sh) > 0 && sb.sh[0].Seq == sb.nextRecvSeq {
f = heap.Pop(&sb.sh).(*Frame) f = *heap.Pop(&sb.sh).(*Frame)
if f.Closing != closingNothing { if f.Closing != C_NOOP {
return true, nil return true, nil
} else { } else {
sb.buf.Write(f.Payload) sb.buf.Write(f.Payload)
@ -101,6 +95,10 @@ func (sb *streamBuffer) Read(buf []byte) (int, error) {
return sb.buf.Read(buf) return sb.buf.Read(buf)
} }
func (sb *streamBuffer) WriteTo(w io.Writer) (int64, error) {
return sb.buf.WriteTo(w)
}
func (sb *streamBuffer) Close() error { func (sb *streamBuffer) Close() error {
sb.recvM.Lock() sb.recvM.Lock()
defer sb.recvM.Unlock() defer sb.recvM.Unlock()
@ -108,4 +106,5 @@ func (sb *streamBuffer) Close() error {
return sb.buf.Close() return sb.buf.Close()
} }
func (sb *streamBuffer) SetReadDeadline(t time.Time) { sb.buf.SetReadDeadline(t) } func (sb *streamBuffer) SetReadDeadline(t time.Time) { sb.buf.SetReadDeadline(t) }
func (sb *streamBuffer) SetWriteToTimeout(d time.Duration) { sb.buf.SetWriteToTimeout(d) }

View File

@ -3,7 +3,6 @@ package multiplex
import ( import (
"encoding/binary" "encoding/binary"
"io" "io"
//"log" //"log"
"sort" "sort"
"testing" "testing"
@ -21,10 +20,11 @@ func TestRecvNewFrame(t *testing.T) {
for _, n := range set { for _, n := range set {
bu64 := make([]byte, 8) bu64 := make([]byte, 8)
binary.BigEndian.PutUint64(bu64, n) binary.BigEndian.PutUint64(bu64, n)
sb.Write(&Frame{ frame := Frame{
Seq: n, Seq: n,
Payload: bu64, Payload: bu64,
}) }
sb.Write(frame)
} }
var sortedResult []uint64 var sortedResult []uint64
@ -80,7 +80,7 @@ func TestStreamBuffer_RecvThenClose(t *testing.T) {
Closing: 0, Closing: 0,
Payload: testData, Payload: testData,
} }
sb.Write(&testFrame) sb.Write(testFrame)
sb.Close() sb.Close()
readBuf := make([]byte, testDataLen) readBuf := make([]byte, testDataLen)

View File

@ -1,102 +0,0 @@
// This is base on https://github.com/golang/go/blob/0436b162397018c45068b47ca1b5924a3eafdee0/src/net/net_fake.go#L173
package multiplex
import (
"bytes"
"io"
"sync"
"time"
)
// The point of a streamBufferedPipe is that Read() will block until data is available
type streamBufferedPipe struct {
buf *bytes.Buffer
closed bool
rwCond *sync.Cond
rDeadline time.Time
wtTimeout time.Duration
timeoutTimer *time.Timer
}
func NewStreamBufferedPipe() *streamBufferedPipe {
p := &streamBufferedPipe{
rwCond: sync.NewCond(&sync.Mutex{}),
buf: new(bytes.Buffer),
}
return p
}
func (p *streamBufferedPipe) Read(target []byte) (int, error) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
for {
if p.closed && p.buf.Len() == 0 {
return 0, io.EOF
}
hasRDeadline := !p.rDeadline.IsZero()
if hasRDeadline {
if time.Until(p.rDeadline) <= 0 {
return 0, ErrTimeout
}
}
if p.buf.Len() > 0 {
break
}
if hasRDeadline {
p.broadcastAfter(time.Until(p.rDeadline))
}
p.rwCond.Wait()
}
n, err := p.buf.Read(target)
// err will always be nil because we have already verified that buf.Len() != 0
p.rwCond.Broadcast()
return n, err
}
func (p *streamBufferedPipe) Write(input []byte) (int, error) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
for {
if p.closed {
return 0, io.ErrClosedPipe
}
if p.buf.Len() <= recvBufferSizeLimit {
// if p.buf gets too large, write() will panic. We don't want this to happen
break
}
p.rwCond.Wait()
}
n, err := p.buf.Write(input)
// err will always be nil
p.rwCond.Broadcast()
return n, err
}
func (p *streamBufferedPipe) Close() error {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
p.closed = true
p.rwCond.Broadcast()
return nil
}
func (p *streamBufferedPipe) SetReadDeadline(t time.Time) {
p.rwCond.L.Lock()
defer p.rwCond.L.Unlock()
p.rDeadline = t
p.rwCond.Broadcast()
}
func (p *streamBufferedPipe) broadcastAfter(d time.Duration) {
if p.timeoutTimer != nil {
p.timeoutTimer.Stop()
}
p.timeoutTimer = time.AfterFunc(d, p.rwCond.Broadcast)
}

View File

@ -1,93 +0,0 @@
package multiplex
import (
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const readBlockTime = 500 * time.Millisecond
func TestPipeRW(t *testing.T) {
pipe := NewStreamBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
n, err := pipe.Write(b)
assert.NoError(t, err, "simple write")
assert.Equal(t, len(b), n, "number of bytes written")
b2 := make([]byte, len(b))
n, err = pipe.Read(b2)
assert.NoError(t, err, "simple read")
assert.Equal(t, len(b), n, "number of bytes read")
assert.Equal(t, b, b2)
}
func TestReadBlock(t *testing.T) {
pipe := NewStreamBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
go func() {
time.Sleep(readBlockTime)
pipe.Write(b)
}()
b2 := make([]byte, len(b))
n, err := pipe.Read(b2)
assert.NoError(t, err, "blocked read")
assert.Equal(t, len(b), n, "number of bytes read after block")
assert.Equal(t, b, b2)
}
func TestPartialRead(t *testing.T) {
pipe := NewStreamBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
pipe.Write(b)
b1 := make([]byte, 1)
n, err := pipe.Read(b1)
assert.NoError(t, err, "partial read of 1")
assert.Equal(t, len(b1), n, "number of bytes in partial read of 1")
assert.Equal(t, b[0], b1[0])
b2 := make([]byte, 2)
n, err = pipe.Read(b2)
assert.NoError(t, err, "partial read of 2")
assert.Equal(t, len(b2), n, "number of bytes in partial read of 2")
assert.Equal(t, b[1:], b2)
}
func TestReadAfterClose(t *testing.T) {
pipe := NewStreamBufferedPipe()
b := []byte{0x01, 0x02, 0x03}
pipe.Write(b)
b2 := make([]byte, len(b))
pipe.Close()
n, err := pipe.Read(b2)
assert.NoError(t, err, "simple read")
assert.Equal(t, len(b), n, "number of bytes read")
assert.Equal(t, b, b2)
}
func BenchmarkBufferedPipe_RW(b *testing.B) {
const PAYLOAD_LEN = 1000
testData := make([]byte, PAYLOAD_LEN)
rand.Read(testData)
pipe := NewStreamBufferedPipe()
smallBuf := make([]byte, PAYLOAD_LEN-10)
go func() {
for {
pipe.Read(smallBuf)
}
}()
b.SetBytes(int64(len(testData)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
pipe.Write(testData)
}
}

View File

@ -2,14 +2,13 @@ package multiplex
import ( import (
"bytes" "bytes"
"github.com/cbeuw/Cloak/internal/common"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"testing" "testing"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/stretchr/testify/assert"
"github.com/cbeuw/connutil" "github.com/cbeuw/connutil"
) )
@ -37,10 +36,9 @@ func BenchmarkStream_Write_Ordered(b *testing.B) {
testData := make([]byte, testDataLen) testData := make([]byte, testDataLen)
rand.Read(testData) rand.Read(testData)
eMethods := map[string]byte{ eMethods := map[string]byte{
"plain": EncryptionMethodPlain, "plain": E_METHOD_PLAIN,
"chacha20-poly1305": EncryptionMethodChaha20Poly1305, "chacha20-poly1305": E_METHOD_CHACHA20_POLY1305,
"aes-256-gcm": EncryptionMethodAES256GCM, "aes-gcm": E_METHOD_AES_GCM,
"aes-128-gcm": EncryptionMethodAES128GCM,
} }
for name, method := range eMethods { for name, method := range eMethods {
@ -57,11 +55,62 @@ func BenchmarkStream_Write_Ordered(b *testing.B) {
} }
} }
/*
func BenchmarkStream_Read_Ordered(b *testing.B) {
var sessionKey [32]byte
rand.Read(sessionKey[:])
sesh := setupSesh(false, sessionKey)
testPayload := make([]byte, payloadLen)
rand.Read(testPayload)
f := &Frame{
1,
0,
0,
testPayload,
}
obfsBuf := make([]byte, 17000)
l, _ := net.Listen("tcp", "127.0.0.1:0")
go func() {
// potentially bottlenecked here rather than the actual stream read throughput
conn, _ := net.Dial("tcp", l.Addr().String())
for {
i, _ := sesh.Obfs(f, obfsBuf)
f.Seq += 1
_, err := conn.Write(obfsBuf[:i])
if err != nil {
b.Error("cannot write to connection", err)
}
}
}()
conn, _ := l.Accept()
sesh.AddConnection(conn)
stream, err := sesh.Accept()
if err != nil {
b.Error("failed to accept stream", err)
}
//time.Sleep(5*time.Second) // wait for buffer to fill up
readBuf := make([]byte, payloadLen)
b.SetBytes(payloadLen)
b.ResetTimer()
for j := 0; j < b.N; j++ {
stream.Read(readBuf)
}
}
*/
func TestStream_Write(t *testing.T) { func TestStream_Write(t *testing.T) {
hole := connutil.Discard() hole := connutil.Discard()
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain) sesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
sesh.AddConnection(hole) sesh.AddConnection(hole)
testData := make([]byte, payloadLen) testData := make([]byte, payloadLen)
rand.Read(testData) rand.Read(testData)
@ -80,11 +129,11 @@ func TestStream_WriteSync(t *testing.T) {
// Close calls made after write MUST have a higher seq // Close calls made after write MUST have a higher seq
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
clientSesh := setupSesh(false, sessionKey, EncryptionMethodPlain) clientSesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
serverSesh := setupSesh(false, sessionKey, EncryptionMethodPlain) serverSesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
w, r := connutil.AsyncPipe() w, r := connutil.AsyncPipe()
clientSesh.AddConnection(common.NewTLSConn(w)) clientSesh.AddConnection(&common.TLSConn{Conn: w})
serverSesh.AddConnection(common.NewTLSConn(r)) serverSesh.AddConnection(&common.TLSConn{Conn: r})
testData := make([]byte, payloadLen) testData := make([]byte, payloadLen)
rand.Read(testData) rand.Read(testData)
@ -126,126 +175,46 @@ func TestStream_WriteSync(t *testing.T) {
func TestStream_Close(t *testing.T) { func TestStream_Close(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
sesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
testPayload := []byte{42, 42, 42} testPayload := []byte{42, 42, 42}
dataFrame := &Frame{ f := &Frame{
1, 1,
0, 0,
0, 0,
testPayload, testPayload,
} }
t.Run("active closing", func(t *testing.T) { conn, writingEnd := connutil.AsyncPipe()
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain) sesh.AddConnection(conn)
rawConn, rawWritingEnd := connutil.AsyncPipe() obfsBuf := make([]byte, 512)
sesh.AddConnection(common.NewTLSConn(rawConn)) i, _ := sesh.Obfs(f, obfsBuf, 0)
writingEnd := common.NewTLSConn(rawWritingEnd) writingEnd.Write(obfsBuf[:i])
time.Sleep(100 * time.Microsecond)
stream, err := sesh.Accept()
if err != nil {
t.Error("failed to accept stream", err)
return
}
err = stream.Close()
if err != nil {
t.Error("failed to actively close stream", err)
return
}
obfsBuf := make([]byte, 512) if sI, _ := sesh.streams.Load(stream.(*Stream).id); sI != nil {
i, _ := sesh.obfuscate(dataFrame, obfsBuf, 0) t.Error("stream still exists")
_, err := writingEnd.Write(obfsBuf[:i]) return
if err != nil { }
t.Error("failed to write from remote end")
}
stream, err := sesh.Accept()
if err != nil {
t.Error("failed to accept stream", err)
return
}
time.Sleep(500 * time.Millisecond)
err = stream.Close()
if err != nil {
t.Error("failed to actively close stream", err)
return
}
sesh.streamsM.Lock() readBuf := make([]byte, len(testPayload))
if s, _ := sesh.streams[stream.(*Stream).id]; s != nil { _, err = io.ReadFull(stream, readBuf)
sesh.streamsM.Unlock() if err != nil {
t.Error("stream still exists") t.Errorf("can't read residual data %v", err)
return }
} if !bytes.Equal(readBuf, testPayload) {
sesh.streamsM.Unlock() t.Errorf("read wrong data")
}
readBuf := make([]byte, len(testPayload))
_, err = io.ReadFull(stream, readBuf)
if err != nil {
t.Errorf("cannot read resiual data: %v", err)
}
if !bytes.Equal(readBuf, testPayload) {
t.Errorf("read wrong data")
}
})
t.Run("passive closing", func(t *testing.T) {
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain)
rawConn, rawWritingEnd := connutil.AsyncPipe()
sesh.AddConnection(common.NewTLSConn(rawConn))
writingEnd := common.NewTLSConn(rawWritingEnd)
obfsBuf := make([]byte, 512)
i, err := sesh.obfuscate(dataFrame, obfsBuf, 0)
if err != nil {
t.Errorf("failed to obfuscate frame %v", err)
}
_, err = writingEnd.Write(obfsBuf[:i])
if err != nil {
t.Error("failed to write from remote end")
}
stream, err := sesh.Accept()
if err != nil {
t.Error("failed to accept stream", err)
return
}
closingFrame := &Frame{
1,
dataFrame.Seq + 1,
closingStream,
testPayload,
}
i, err = sesh.obfuscate(closingFrame, obfsBuf, 0)
if err != nil {
t.Errorf("failed to obfuscate frame %v", err)
}
_, err = writingEnd.Write(obfsBuf[:i])
if err != nil {
t.Errorf("failed to write from remote end %v", err)
}
closingFrameDup := &Frame{
1,
dataFrame.Seq + 2,
closingStream,
testPayload,
}
i, err = sesh.obfuscate(closingFrameDup, obfsBuf, 0)
if err != nil {
t.Errorf("failed to obfuscate frame %v", err)
}
_, err = writingEnd.Write(obfsBuf[:i])
if err != nil {
t.Errorf("failed to write from remote end %v", err)
}
readBuf := make([]byte, len(testPayload))
_, err = io.ReadFull(stream, readBuf)
if err != nil {
t.Errorf("can't read residual data %v", err)
}
assert.Eventually(t, func() bool {
sesh.streamsM.Lock()
s, _ := sesh.streams[stream.(*Stream).id]
sesh.streamsM.Unlock()
return s == nil
}, time.Second, 10*time.Millisecond, "streams still exists")
})
} }
func TestStream_Read(t *testing.T) { func TestStream_Read(t *testing.T) {
@ -264,20 +233,21 @@ func TestStream_Read(t *testing.T) {
} }
var streamID uint32 var streamID uint32
buf := make([]byte, 10)
obfsBuf := make([]byte, 512)
for name, unordered := range seshes { for name, unordered := range seshes {
sesh := setupSesh(unordered, emptyKey, EncryptionMethodPlain) sesh := setupSesh(unordered, emptyKey, E_METHOD_PLAIN)
rawConn, rawWritingEnd := connutil.AsyncPipe() conn, writingEnd := connutil.AsyncPipe()
sesh.AddConnection(common.NewTLSConn(rawConn)) sesh.AddConnection(conn)
writingEnd := common.NewTLSConn(rawWritingEnd)
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
buf := make([]byte, 10)
obfsBuf := make([]byte, 512)
t.Run("Plain read", func(t *testing.T) { t.Run("Plain read", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
time.Sleep(100 * time.Microsecond)
stream, err := sesh.Accept() stream, err := sesh.Accept()
if err != nil { if err != nil {
t.Error("failed to accept stream", err) t.Error("failed to accept stream", err)
@ -300,9 +270,10 @@ func TestStream_Read(t *testing.T) {
}) })
t.Run("Nil buf", func(t *testing.T) { t.Run("Nil buf", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
time.Sleep(100 * time.Microsecond)
stream, _ := sesh.Accept() stream, _ := sesh.Accept()
i, err := stream.Read(nil) i, err := stream.Read(nil)
if i != 0 || err != nil { if i != 0 || err != nil {
@ -312,22 +283,22 @@ func TestStream_Read(t *testing.T) {
}) })
t.Run("Read after stream close", func(t *testing.T) { t.Run("Read after stream close", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
time.Sleep(100 * time.Microsecond)
stream, _ := sesh.Accept() stream, _ := sesh.Accept()
time.Sleep(500 * time.Millisecond)
stream.Close() stream.Close()
i, err := stream.Read(buf)
_, err := io.ReadFull(stream, buf[:smallPayloadLen])
if err != nil { if err != nil {
t.Errorf("cannot read residual data: %v", err) t.Error("failed to read", err)
} }
if !bytes.Equal(buf[:smallPayloadLen], testPayload) { if i != smallPayloadLen {
t.Errorf("expected read %v, got %v", smallPayloadLen, i)
}
if !bytes.Equal(buf[:i], testPayload) {
t.Error("expected", testPayload, t.Error("expected", testPayload,
"got", buf[:smallPayloadLen]) "got", buf[:i])
} }
_, err = stream.Read(buf) _, err = stream.Read(buf)
if err == nil { if err == nil {
@ -337,21 +308,22 @@ func TestStream_Read(t *testing.T) {
}) })
t.Run("Read after session close", func(t *testing.T) { t.Run("Read after session close", func(t *testing.T) {
f.StreamID = streamID f.StreamID = streamID
i, _ := sesh.obfuscate(f, obfsBuf, 0) i, _ := sesh.Obfs(f, obfsBuf, 0)
streamID++ streamID++
writingEnd.Write(obfsBuf[:i]) writingEnd.Write(obfsBuf[:i])
time.Sleep(100 * time.Microsecond)
stream, _ := sesh.Accept() stream, _ := sesh.Accept()
time.Sleep(500 * time.Millisecond)
sesh.Close() sesh.Close()
_, err := io.ReadFull(stream, buf[:smallPayloadLen]) i, err := stream.Read(buf)
if err != nil { if err != nil {
t.Errorf("cannot read resiual data: %v", err) t.Error("failed to read", err)
} }
if !bytes.Equal(buf[:smallPayloadLen], testPayload) { if i != smallPayloadLen {
t.Errorf("expected read %v, got %v", smallPayloadLen, i)
}
if !bytes.Equal(buf[:i], testPayload) {
t.Error("expected", testPayload, t.Error("expected", testPayload,
"got", buf[:smallPayloadLen]) "got", buf[:i])
} }
_, err = stream.Read(buf) _, err = stream.Read(buf)
if err == nil { if err == nil {
@ -363,10 +335,35 @@ func TestStream_Read(t *testing.T) {
} }
} }
func TestStream_SetWriteToTimeout(t *testing.T) {
seshes := map[string]*Session{
"ordered": setupSesh(false, emptyKey, E_METHOD_PLAIN),
"unordered": setupSesh(true, emptyKey, E_METHOD_PLAIN),
}
for name, sesh := range seshes {
t.Run(name, func(t *testing.T) {
stream, _ := sesh.OpenStream()
stream.SetWriteToTimeout(100 * time.Millisecond)
done := make(chan struct{})
go func() {
stream.WriteTo(ioutil.Discard)
done <- struct{}{}
}()
select {
case <-done:
return
case <-time.After(500 * time.Millisecond):
t.Error("didn't timeout")
}
})
}
}
func TestStream_SetReadFromTimeout(t *testing.T) { func TestStream_SetReadFromTimeout(t *testing.T) {
seshes := map[string]*Session{ seshes := map[string]*Session{
"ordered": setupSesh(false, emptyKey, EncryptionMethodPlain), "ordered": setupSesh(false, emptyKey, E_METHOD_PLAIN),
"unordered": setupSesh(true, emptyKey, EncryptionMethodPlain), "unordered": setupSesh(true, emptyKey, E_METHOD_PLAIN),
} }
for name, sesh := range seshes { for name, sesh := range seshes {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {

View File

@ -2,159 +2,165 @@ package multiplex
import ( import (
"errors" "errors"
"github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"math/rand/v2" "math/rand"
"net" "net"
"sync" "sync"
"sync/atomic" "sync/atomic"
) )
type switchboardStrategy int
const ( const (
fixedConnMapping switchboardStrategy = iota FIXED_CONN_MAPPING switchboardStrategy = iota
uniformSpread UNIFORM_SPREAD
) )
// switchboard represents the connection pool. It is responsible for managing type switchboardConfig struct {
// transport-layer connections between client and server. valve Valve
// It has several purposes: constantly receiving incoming data from all connections strategy switchboardStrategy
// and pass them to Session.recvDataFromRemote(); accepting data through recvBufferSize int
// switchboard.send(), in which it selects a connection according to its }
// switchboardStrategy and send the data off using that; and counting, as well as
// rate limiting, data received and sent through its Valve. // switchboard is responsible for keeping the reference of TCP connections between client and server
type switchboard struct { type switchboard struct {
session *Session session *Session
valve Valve switchboardConfig
strategy switchboardStrategy
conns sync.Map conns sync.Map
connsCount uint32 numConns uint32
randPool sync.Pool nextConnId uint32
broken uint32 broken uint32
} }
func makeSwitchboard(sesh *Session) *switchboard { func makeSwitchboard(sesh *Session, config switchboardConfig) *switchboard {
// rates are uint64 because in the usermanager we want the bandwidth to be atomically
// operated (so that the bandwidth can change on the fly).
sb := &switchboard{ sb := &switchboard{
session: sesh, session: sesh,
strategy: uniformSpread, switchboardConfig: config,
valve: sesh.Valve, nextConnId: 1,
randPool: sync.Pool{New: func() interface{} {
var state [32]byte
common.CryptoRandRead(state[:])
return rand.New(rand.NewChaCha8(state))
}},
} }
return sb return sb
} }
var errBrokenSwitchboard = errors.New("the switchboard is broken") var errBrokenSwitchboard = errors.New("the switchboard is broken")
func (sb *switchboard) addConn(conn net.Conn) { func (sb *switchboard) connsCount() int {
connId := atomic.AddUint32(&sb.connsCount, 1) - 1 return int(atomic.LoadUint32(&sb.numConns))
sb.conns.Store(connId, conn)
go sb.deplex(conn)
} }
// a pointer to assignedConn is passed here so that the switchboard can reassign it if that conn isn't usable func (sb *switchboard) addConn(conn net.Conn) {
func (sb *switchboard) send(data []byte, assignedConn *net.Conn) (n int, err error) { connId := atomic.AddUint32(&sb.nextConnId, 1) - 1
atomic.AddUint32(&sb.numConns, 1)
sb.conns.Store(connId, conn)
go sb.deplex(connId, conn)
}
// a pointer to connId is passed here so that the switchboard can reassign it
func (sb *switchboard) send(data []byte, connId *uint32) (n int, err error) {
writeAndRegUsage := func(conn net.Conn, d []byte) (int, error) {
n, err = conn.Write(d)
if err != nil {
sb.conns.Delete(*connId)
sb.close("failed to write to remote " + err.Error())
return n, err
}
sb.valve.AddTx(int64(n))
return n, nil
}
sb.valve.txWait(len(data)) sb.valve.txWait(len(data))
if atomic.LoadUint32(&sb.broken) == 1 { if atomic.LoadUint32(&sb.broken) == 1 || sb.connsCount() == 0 {
return 0, errBrokenSwitchboard return 0, errBrokenSwitchboard
} }
var conn net.Conn
switch sb.strategy { switch sb.strategy {
case uniformSpread: case UNIFORM_SPREAD:
conn, err = sb.pickRandConn() _, conn, err := sb.pickRandConn()
if err != nil { if err != nil {
return 0, errBrokenSwitchboard return 0, errBrokenSwitchboard
} }
n, err = conn.Write(data) return writeAndRegUsage(conn, data)
if err != nil { case FIXED_CONN_MAPPING:
sb.session.SetTerminalMsg("failed to send to remote " + err.Error()) connI, ok := sb.conns.Load(*connId)
sb.session.passiveClose() if ok {
return n, err conn := connI.(net.Conn)
} return writeAndRegUsage(conn, data)
case fixedConnMapping: } else {
// FIXME: this strategy has a tendency to cause a TLS conn socket buffer to fill up, newConnId, conn, err := sb.pickRandConn()
// which is a problem when multiple streams are mapped to the same conn, resulting
// in all such streams being blocked.
conn = *assignedConn
if conn == nil {
conn, err = sb.pickRandConn()
if err != nil { if err != nil {
sb.session.SetTerminalMsg("failed to pick a connection " + err.Error()) return 0, errBrokenSwitchboard
sb.session.passiveClose()
return 0, err
} }
*assignedConn = conn *connId = newConnId
} return writeAndRegUsage(conn, data)
n, err = conn.Write(data)
if err != nil {
sb.session.SetTerminalMsg("failed to send to remote " + err.Error())
sb.session.passiveClose()
return n, err
} }
default: default:
return 0, errors.New("unsupported traffic distribution strategy") return 0, errors.New("unsupported traffic distribution strategy")
} }
sb.valve.AddTx(int64(n))
return n, nil
} }
// returns a random conn. This function can be called concurrently. // returns a random connId
func (sb *switchboard) pickRandConn() (net.Conn, error) { func (sb *switchboard) pickRandConn() (uint32, net.Conn, error) {
if atomic.LoadUint32(&sb.broken) == 1 { connCount := sb.connsCount()
return nil, errBrokenSwitchboard if atomic.LoadUint32(&sb.broken) == 1 || connCount == 0 {
return 0, nil, errBrokenSwitchboard
} }
connsCount := atomic.LoadUint32(&sb.connsCount) // there is no guarantee that sb.conns still has the same amount of entries
if connsCount == 0 { // between the count loop and the pick loop
return nil, errBrokenSwitchboard // so if the r > len(sb.conns) at the point of range call, the last visited element is picked
var id uint32
var conn net.Conn
r := rand.Intn(connCount)
var c int
sb.conns.Range(func(connIdI, connI interface{}) bool {
if r == c {
id = connIdI.(uint32)
conn = connI.(net.Conn)
return false
}
c++
return true
})
// if len(sb.conns) is 0
if conn == nil {
return 0, nil, errBrokenSwitchboard
} }
return id, conn, nil
}
randReader := sb.randPool.Get().(*rand.Rand) func (sb *switchboard) close(terminalMsg string) {
connId := randReader.Uint32N(connsCount) atomic.StoreUint32(&sb.broken, 1)
sb.randPool.Put(randReader) if !sb.session.IsClosed() {
sb.session.SetTerminalMsg(terminalMsg)
ret, ok := sb.conns.Load(connId) sb.session.passiveClose()
if !ok {
log.Errorf("failed to get conn %d", connId)
return nil, errBrokenSwitchboard
} }
return ret.(net.Conn), nil
} }
// actively triggered by session.Close() // actively triggered by session.Close()
func (sb *switchboard) closeAll() { func (sb *switchboard) closeAll() {
if !atomic.CompareAndSwapUint32(&sb.broken, 0, 1) { sb.conns.Range(func(key, connI interface{}) bool {
return conn := connI.(net.Conn)
} conn.Close()
atomic.StoreUint32(&sb.connsCount, 0) sb.conns.Delete(key)
sb.conns.Range(func(_, conn interface{}) bool {
conn.(net.Conn).Close()
sb.conns.Delete(conn)
return true return true
}) })
} }
// deplex function costantly reads from a TCP connection // deplex function costantly reads from a TCP connection
func (sb *switchboard) deplex(conn net.Conn) { func (sb *switchboard) deplex(connId uint32, conn net.Conn) {
defer conn.Close() defer conn.Close()
buf := make([]byte, sb.session.connReceiveBufferSize) buf := make([]byte, sb.recvBufferSize)
for { for {
n, err := conn.Read(buf) n, err := conn.Read(buf)
sb.valve.rxWait(n) sb.valve.rxWait(n)
sb.valve.AddRx(int64(n)) sb.valve.AddRx(int64(n))
if err != nil { if err != nil {
log.Debugf("a connection for session %v has closed: %v", sb.session.id, err) log.Debugf("a connection for session %v has closed: %v", sb.session.id, err)
sb.session.SetTerminalMsg("a connection has dropped unexpectedly") sb.conns.Delete(connId)
sb.session.passiveClose() atomic.AddUint32(&sb.numConns, ^uint32(0))
sb.close("a connection has dropped unexpectedly")
return return
} }

View File

@ -1,14 +1,11 @@
package multiplex package multiplex
import ( import (
"github.com/cbeuw/connutil"
"math/rand" "math/rand"
"sync" "sync"
"sync/atomic"
"testing" "testing"
"time" "time"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
func TestSwitchboard_Send(t *testing.T) { func TestSwitchboard_Send(t *testing.T) {
@ -16,14 +13,14 @@ func TestSwitchboard_Send(t *testing.T) {
sesh := MakeSession(0, seshConfig) sesh := MakeSession(0, seshConfig)
hole0 := connutil.Discard() hole0 := connutil.Discard()
sesh.sb.addConn(hole0) sesh.sb.addConn(hole0)
conn, err := sesh.sb.pickRandConn() connId, _, err := sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
} }
data := make([]byte, 1000) data := make([]byte, 1000)
rand.Read(data) rand.Read(data)
_, err = sesh.sb.send(data, &conn) _, err = sesh.sb.send(data, &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -31,23 +28,23 @@ func TestSwitchboard_Send(t *testing.T) {
hole1 := connutil.Discard() hole1 := connutil.Discard()
sesh.sb.addConn(hole1) sesh.sb.addConn(hole1)
conn, err = sesh.sb.pickRandConn() connId, _, err = sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
} }
_, err = sesh.sb.send(data, &conn) _, err = sesh.sb.send(data, &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
conn, err = sesh.sb.pickRandConn() connId, _, err = sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
} }
_, err = sesh.sb.send(data, &conn) _, err = sesh.sb.send(data, &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -73,7 +70,7 @@ func BenchmarkSwitchboard_Send(b *testing.B) {
seshConfig := SessionConfig{} seshConfig := SessionConfig{}
sesh := MakeSession(0, seshConfig) sesh := MakeSession(0, seshConfig)
sesh.sb.addConn(hole) sesh.sb.addConn(hole)
conn, err := sesh.sb.pickRandConn() connId, _, err := sesh.sb.pickRandConn()
if err != nil { if err != nil {
b.Error("failed to get a random conn", err) b.Error("failed to get a random conn", err)
return return
@ -83,7 +80,7 @@ func BenchmarkSwitchboard_Send(b *testing.B) {
b.SetBytes(int64(len(data))) b.SetBytes(int64(len(data)))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
sesh.sb.send(data, &conn) sesh.sb.send(data, &connId)
} }
} }
@ -94,7 +91,7 @@ func TestSwitchboard_TxCredit(t *testing.T) {
sesh := MakeSession(0, seshConfig) sesh := MakeSession(0, seshConfig)
hole := connutil.Discard() hole := connutil.Discard()
sesh.sb.addConn(hole) sesh.sb.addConn(hole)
conn, err := sesh.sb.pickRandConn() connId, _, err := sesh.sb.pickRandConn()
if err != nil { if err != nil {
t.Error("failed to get a random conn", err) t.Error("failed to get a random conn", err)
return return
@ -102,10 +99,10 @@ func TestSwitchboard_TxCredit(t *testing.T) {
data := make([]byte, 1000) data := make([]byte, 1000)
rand.Read(data) rand.Read(data)
t.Run("fixed conn mapping", func(t *testing.T) { t.Run("FIXED CONN MAPPING", func(t *testing.T) {
*sesh.sb.valve.(*LimitedValve).tx = 0 *sesh.sb.valve.(*LimitedValve).tx = 0
sesh.sb.strategy = fixedConnMapping sesh.sb.strategy = FIXED_CONN_MAPPING
n, err := sesh.sb.send(data[:10], &conn) n, err := sesh.sb.send(data[:10], &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -118,10 +115,10 @@ func TestSwitchboard_TxCredit(t *testing.T) {
t.Error("tx credit didn't increase by 10") t.Error("tx credit didn't increase by 10")
} }
}) })
t.Run("uniform spread", func(t *testing.T) { t.Run("UNIFORM", func(t *testing.T) {
*sesh.sb.valve.(*LimitedValve).tx = 0 *sesh.sb.valve.(*LimitedValve).tx = 0
sesh.sb.strategy = uniformSpread sesh.sb.strategy = UNIFORM_SPREAD
n, err := sesh.sb.send(data[:10], &conn) n, err := sesh.sb.send(data[:10], &connId)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -139,7 +136,7 @@ func TestSwitchboard_TxCredit(t *testing.T) {
func TestSwitchboard_CloseOnOneDisconn(t *testing.T) { func TestSwitchboard_CloseOnOneDisconn(t *testing.T) {
var sessionKey [32]byte var sessionKey [32]byte
rand.Read(sessionKey[:]) rand.Read(sessionKey[:])
sesh := setupSesh(false, sessionKey, EncryptionMethodPlain) sesh := setupSesh(false, sessionKey, E_METHOD_PLAIN)
conn0client, conn0server := connutil.AsyncPipe() conn0client, conn0server := connutil.AsyncPipe()
sesh.AddConnection(conn0client) sesh.AddConnection(conn0client)
@ -148,11 +145,11 @@ func TestSwitchboard_CloseOnOneDisconn(t *testing.T) {
sesh.AddConnection(conn1client) sesh.AddConnection(conn1client)
conn0server.Close() conn0server.Close()
time.Sleep(500 * time.Millisecond)
assert.Eventually(t, func() bool { if !sesh.IsClosed() {
return sesh.IsClosed() t.Error("session not closed after one conn is disconnected")
}, time.Second, 10*time.Millisecond, "session not closed after one conn is disconnected") return
}
if _, err := conn1client.Write([]byte{0x00}); err == nil { if _, err := conn1client.Write([]byte{0x00}); err == nil {
t.Error("the other conn is still connected") t.Error("the other conn is still connected")
return return
@ -175,13 +172,15 @@ func TestSwitchboard_ConnsCount(t *testing.T) {
} }
wg.Wait() wg.Wait()
if atomic.LoadUint32(&sesh.sb.connsCount) != 1000 { if sesh.sb.connsCount() != 1000 {
t.Error("connsCount incorrect") t.Error("connsCount incorrect")
} }
sesh.sb.closeAll() sesh.sb.closeAll()
assert.Eventuallyf(t, func() bool { time.Sleep(500 * time.Millisecond)
return atomic.LoadUint32(&sesh.sb.connsCount) == 0 if sesh.sb.connsCount() != 0 {
}, time.Second, 10*time.Millisecond, "connsCount incorrect: %v", atomic.LoadUint32(&sesh.sb.connsCount)) t.Error("connsCount incorrect")
}
} }

View File

@ -4,11 +4,11 @@ import (
"crypto" "crypto"
"errors" "errors"
"fmt" "fmt"
"io"
"net"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
"io"
"math/rand"
"net"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -45,7 +45,8 @@ func (TLS) makeResponder(clientHelloSessionId []byte, sharedSecret [32]byte) Res
// the cert length needs to be the same for all handshakes belonging to the same session // the cert length needs to be the same for all handshakes belonging to the same session
// we can use sessionKey as a seed here to ensure consistency // we can use sessionKey as a seed here to ensure consistency
possibleCertLengths := []int{42, 27, 68, 59, 36, 44, 46} possibleCertLengths := []int{42, 27, 68, 59, 36, 44, 46}
cert := make([]byte, possibleCertLengths[common.RandInt(len(possibleCertLengths))]) rand.Seed(int64(sessionKey[0]))
cert := make([]byte, possibleCertLengths[rand.Intn(len(possibleCertLengths))])
common.RandRead(randSource, cert) common.RandRead(randSource, cert)
var nonce [12]byte var nonce [12]byte
@ -64,7 +65,7 @@ func (TLS) makeResponder(clientHelloSessionId []byte, sharedSecret [32]byte) Res
originalConn.Close() originalConn.Close()
return return
} }
preparedConn = common.NewTLSConn(originalConn) preparedConn = &common.TLSConn{Conn: originalConn}
return return
} }
return respond return respond
@ -78,13 +79,7 @@ func (TLS) unmarshalClientHello(ch *ClientHello, staticPv crypto.PrivateKey) (fr
return return
} }
var sharedSecret []byte copy(fragments.sharedSecret[:], ecdh.GenerateSharedSecret(staticPv, ephPub))
sharedSecret, err = ecdh.GenerateSharedSecret(staticPv, ephPub)
if err != nil {
return
}
copy(fragments.sharedSecret[:], sharedSecret)
var keyShare []byte var keyShare []byte
keyShare, err = parseKeyShare(ch.extensions[[2]byte{0x00, 0x33}]) keyShare, err = parseKeyShare(ch.extensions[[2]byte{0x00, 0x33}])
if err != nil { if err != nil {

View File

@ -5,7 +5,6 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
) )
@ -164,12 +163,12 @@ func parseClientHello(data []byte) (ret *ClientHello, err error) {
func composeServerHello(sessionId []byte, nonce [12]byte, encryptedSessionKeyWithTag [48]byte) []byte { func composeServerHello(sessionId []byte, nonce [12]byte, encryptedSessionKeyWithTag [48]byte) []byte {
var serverHello [11][]byte var serverHello [11][]byte
serverHello[0] = []byte{0x02} // handshake type serverHello[0] = []byte{0x02} // handshake type
serverHello[1] = []byte{0x00, 0x00, 0x76} // length 118 serverHello[1] = []byte{0x00, 0x00, 0x76} // length 77
serverHello[2] = []byte{0x03, 0x03} // server version serverHello[2] = []byte{0x03, 0x03} // server version
serverHello[3] = append(nonce[0:12], encryptedSessionKeyWithTag[0:20]...) // random 32 bytes serverHello[3] = append(nonce[0:12], encryptedSessionKeyWithTag[0:20]...) // random 32 bytes
serverHello[4] = []byte{0x20} // session id length 32 serverHello[4] = []byte{0x20} // session id length 32
serverHello[5] = sessionId // session id serverHello[5] = sessionId // session id
serverHello[6] = []byte{0x13, 0x02} // cipher suite TLS_AES_256_GCM_SHA384 serverHello[6] = []byte{0xc0, 0x30} // cipher suite TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
serverHello[7] = []byte{0x00} // compression method null serverHello[7] = []byte{0x00} // compression method null
serverHello[8] = []byte{0x00, 0x2e} // extensions length 46 serverHello[8] = []byte{0x00, 0x2e} // extensions length 46

View File

@ -1,9 +1,8 @@
package server package server
import ( import (
"sync"
"github.com/cbeuw/Cloak/internal/server/usermanager" "github.com/cbeuw/Cloak/internal/server/usermanager"
"sync"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
) )

View File

@ -3,13 +3,12 @@ package server
import ( import (
"crypto/rand" "crypto/rand"
"encoding/base64" "encoding/base64"
"io/ioutil"
"os"
"testing"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
"github.com/cbeuw/Cloak/internal/server/usermanager" "github.com/cbeuw/Cloak/internal/server/usermanager"
"io/ioutil"
"os"
"testing"
) )
func getSeshConfig(unordered bool) mux.SessionConfig { func getSeshConfig(unordered bool) mux.SessionConfig {

View File

@ -5,9 +5,8 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"time"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -51,7 +50,7 @@ func decryptClientInfo(fragments authFragments, serverTime time.Time) (info Clie
timestamp := int64(binary.BigEndian.Uint64(plaintext[29:37])) timestamp := int64(binary.BigEndian.Uint64(plaintext[29:37]))
clientTime := time.Unix(timestamp, 0) clientTime := time.Unix(timestamp, 0)
if !(clientTime.After(serverTime.Add(-timestampTolerance)) && clientTime.Before(serverTime.Add(timestampTolerance))) { if !(clientTime.After(serverTime.Truncate(TIMESTAMP_TOLERANCE)) && clientTime.Before(serverTime.Add(TIMESTAMP_TOLERANCE))) {
err = fmt.Errorf("%v: received timestamp %v", ErrTimestampOutOfWindow, timestamp) err = fmt.Errorf("%v: received timestamp %v", ErrTimestampOutOfWindow, timestamp)
return return
} }
@ -61,7 +60,6 @@ func decryptClientInfo(fragments authFragments, serverTime time.Time) (info Clie
var ErrReplay = errors.New("duplicate random") var ErrReplay = errors.New("duplicate random")
var ErrBadProxyMethod = errors.New("invalid proxy method") var ErrBadProxyMethod = errors.New("invalid proxy method")
var ErrBadDecryption = errors.New("decryption/authentication failure")
// AuthFirstPacket checks if the first packet of data is ClientHello or HTTP GET, and checks if it was from a Cloak client // AuthFirstPacket checks if the first packet of data is ClientHello or HTTP GET, and checks if it was from a Cloak client
// if it is from a Cloak client, it returns the ClientInfo with the decrypted fields. It doesn't check if the user // if it is from a Cloak client, it returns the ClientInfo with the decrypted fields. It doesn't check if the user
@ -78,10 +76,14 @@ func AuthFirstPacket(firstPacket []byte, transport Transport, sta *State) (info
return return
} }
info, err = decryptClientInfo(fragments, sta.WorldState.Now().UTC()) info, err = decryptClientInfo(fragments, sta.WorldState.Now())
if err != nil { if err != nil {
log.Debug(err) log.Debug(err)
err = fmt.Errorf("%w: %v", ErrBadDecryption, err) err = fmt.Errorf("transport %v in correct format but not Cloak: %v", transport, err)
return
}
if _, ok := sta.ProxyBook[info.ProxyMethod]; !ok {
err = ErrBadProxyMethod
return return
} }
info.Transport = transport info.Transport = transport

View File

@ -4,14 +4,13 @@ import (
"crypto" "crypto"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"testing"
"time"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh" "github.com/cbeuw/Cloak/internal/ecdh"
"testing"
"time"
) )
func TestDecryptClientInfo(t *testing.T) { func TestTouchStone(t *testing.T) {
pvBytes, _ := hex.DecodeString("10de5a3c4a4d04efafc3e06d1506363a72bd6d053baef123e6a9a79a0c04b547") pvBytes, _ := hex.DecodeString("10de5a3c4a4d04efafc3e06d1506363a72bd6d053baef123e6a9a79a0c04b547")
p, _ := ecdh.Unmarshal(pvBytes) p, _ := ecdh.Unmarshal(pvBytes)
staticPv := p.(crypto.PrivateKey) staticPv := p.(crypto.PrivateKey)
@ -50,7 +49,7 @@ func TestDecryptClientInfo(t *testing.T) {
t.Errorf("expecting no error, got %v", err) t.Errorf("expecting no error, got %v", err)
return return
} }
nineSixSixM50 := time.Unix(1565998966, 0).Add(-50) nineSixSixM50 := time.Unix(1565998966, 0).Truncate(50)
_, err = decryptClientInfo(ai, nineSixSixM50) _, err = decryptClientInfo(ai, nineSixSixM50)
if err != nil { if err != nil {
t.Errorf("expecting no error, got %v", err) t.Errorf("expecting no error, got %v", err)
@ -67,7 +66,7 @@ func TestDecryptClientInfo(t *testing.T) {
return return
} }
nineSixSixOver := time.Unix(1565998966, 0).Add(timestampTolerance + 10) nineSixSixOver := time.Unix(1565998966, 0).Add(TIMESTAMP_TOLERANCE + 10)
_, err = decryptClientInfo(ai, nineSixSixOver) _, err = decryptClientInfo(ai, nineSixSixOver)
if err == nil { if err == nil {
t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err) t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err)
@ -83,7 +82,7 @@ func TestDecryptClientInfo(t *testing.T) {
return return
} }
nineSixSixUnder := time.Unix(1565998966, 0).Add(-(timestampTolerance + 10)) nineSixSixUnder := time.Unix(1565998966, 0).Add(TIMESTAMP_TOLERANCE - 10)
_, err = decryptClientInfo(ai, nineSixSixUnder) _, err = decryptClientInfo(ai, nineSixSixUnder)
if err == nil { if err == nil {
t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err) t.Errorf("expecting %v, got %v", ErrTimestampOutOfWindow, err)

View File

@ -6,22 +6,19 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"io" "io"
"net" "net"
"net/http" "net/http"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
var b64 = base64.StdEncoding.EncodeToString var b64 = base64.StdEncoding.EncodeToString
const firstPacketSize = 3000
func Serve(l net.Listener, sta *State) { func Serve(l net.Listener, sta *State) {
waitDur := [10]time.Duration{ waitDur := [10]time.Duration{
50 * time.Millisecond, 100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 50 * time.Millisecond, 100 * time.Millisecond, 300 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second,
@ -126,7 +123,7 @@ func readFirstPacket(conn net.Conn, buf []byte, timeout time.Duration) (int, Tra
func dispatchConnection(conn net.Conn, sta *State) { func dispatchConnection(conn net.Conn, sta *State) {
var err error var err error
buf := make([]byte, firstPacketSize) buf := make([]byte, 1500)
i, transport, redirOnErr, err := readFirstPacket(conn, buf, 15*time.Second) i, transport, redirOnErr, err := readFirstPacket(conn, buf, 15*time.Second)
data := buf[:i] data := buf[:i]
@ -178,54 +175,37 @@ func dispatchConnection(conn net.Conn, sta *State) {
common.RandRead(sta.WorldState.Rand, sessionKey[:]) common.RandRead(sta.WorldState.Rand, sessionKey[:])
obfuscator, err := mux.MakeObfuscator(ci.EncryptionMethod, sessionKey) obfuscator, err := mux.MakeObfuscator(ci.EncryptionMethod, sessionKey)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.Error(err)
"remoteAddr": conn.RemoteAddr(),
"UID": b64(ci.UID),
"sessionId": ci.SessionId,
"proxyMethod": ci.ProxyMethod,
"encryptionMethod": ci.EncryptionMethod,
}).Error(err)
goWeb() goWeb()
return return
} }
seshConfig := mux.SessionConfig{ seshConfig := mux.SessionConfig{
Obfuscator: obfuscator, Obfuscator: obfuscator,
Valve: nil, Valve: nil,
Unordered: ci.Unordered, Unordered: ci.Unordered,
MsgOnWireSizeLimit: appDataMaxLength, MaxFrameSize: appDataMaxLength,
} }
// adminUID can use the server as normal with unlimited QoS credits. The adminUID is not // adminUID can use the server as normal with unlimited QoS credits. The adminUID is not
// added to the userinfo database. The distinction between going into the admin mode // added to the userinfo database. The distinction between going into the admin mode
// and normal proxy mode is that sessionID needs == 0 for admin mode // and normal proxy mode is that sessionID needs == 0 for admin mode
if len(sta.AdminUID) != 0 && bytes.Equal(ci.UID, sta.AdminUID) && ci.SessionId == 0 { if bytes.Equal(ci.UID, sta.AdminUID) && ci.SessionId == 0 {
sesh := mux.MakeSession(0, seshConfig)
preparedConn, err := finishHandshake(conn, sessionKey, sta.WorldState.Rand) preparedConn, err := finishHandshake(conn, sessionKey, sta.WorldState.Rand)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
} }
log.Trace("finished handshake") log.Trace("finished handshake")
sesh := mux.MakeSession(0, seshConfig)
sesh.AddConnection(preparedConn) sesh.AddConnection(preparedConn)
//TODO: Router could be nil in cnc mode //TODO: Router could be nil in cnc mode
log.WithField("remoteAddr", preparedConn.RemoteAddr()).Info("New admin session") log.WithField("remoteAddr", preparedConn.RemoteAddr()).Info("New admin session")
err = http.Serve(sesh, usermanager.APIRouterOf(sta.Panel.Manager)) err = http.Serve(sesh, usermanager.APIRouterOf(sta.Panel.Manager))
// http.Serve never returns with non-nil error if err != nil {
log.Error(err) log.Error(err)
return return
} }
if _, ok := sta.ProxyBook[ci.ProxyMethod]; !ok {
log.WithFields(log.Fields{
"remoteAddr": conn.RemoteAddr(),
"UID": b64(ci.UID),
"sessionId": ci.SessionId,
"proxyMethod": ci.ProxyMethod,
"encryptionMethod": ci.EncryptionMethod,
}).Error(ErrBadProxyMethod)
goWeb()
return
} }
var user *ActiveUser var user *ActiveUser
@ -251,26 +231,30 @@ func dispatchConnection(conn net.Conn, sta *State) {
return return
} }
preparedConn, err := finishHandshake(conn, sesh.GetSessionKey(), sta.WorldState.Rand) if existing {
preparedConn, err := finishHandshake(conn, sesh.SessionKey, sta.WorldState.Rand)
if err != nil {
log.Error(err)
return
}
log.Trace("finished handshake")
sesh.AddConnection(preparedConn)
return
}
preparedConn, err := finishHandshake(conn, sessionKey, sta.WorldState.Rand)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return return
} }
log.Trace("finished handshake") log.Trace("finished handshake")
log.WithFields(log.Fields{
"UID": b64(ci.UID),
"sessionID": ci.SessionId,
}).Info("New session")
sesh.AddConnection(preparedConn) sesh.AddConnection(preparedConn)
if !existing {
// if the session was newly made, we serve connections from the session streams to the proxy server
log.WithFields(log.Fields{
"UID": b64(ci.UID),
"sessionID": ci.SessionId,
}).Info("New session")
serveSession(sesh, ci, user, sta)
}
}
func serveSession(sesh *mux.Session, ci ClientInfo, user *ActiveUser, sta *State) error {
for { for {
newStream, err := sesh.Accept() newStream, err := sesh.Accept()
if err != nil { if err != nil {
@ -281,9 +265,9 @@ func serveSession(sesh *mux.Session, ci ClientInfo, user *ActiveUser, sta *State
"reason": sesh.TerminalMsg(), "reason": sesh.TerminalMsg(),
}).Info("Session closed") }).Info("Session closed")
user.CloseSession(ci.SessionId, "") user.CloseSession(ci.SessionId, "")
return nil return
} else { } else {
log.Errorf("unhandled error on session.Accept(): %v", err) // TODO: other errors
continue continue
} }
} }
@ -292,10 +276,12 @@ func serveSession(sesh *mux.Session, ci ClientInfo, user *ActiveUser, sta *State
if err != nil { if err != nil {
log.Errorf("Failed to connect to %v: %v", ci.ProxyMethod, err) log.Errorf("Failed to connect to %v: %v", ci.ProxyMethod, err)
user.CloseSession(ci.SessionId, "Failed to connect to proxy server") user.CloseSession(ci.SessionId, "Failed to connect to proxy server")
return err continue
} }
log.Tracef("%v endpoint has been successfully connected", ci.ProxyMethod) log.Tracef("%v endpoint has been successfully connected", ci.ProxyMethod)
// if stream has nothing to send to proxy server for sta.Timeout period of time, stream will return error
newStream.(*mux.Stream).SetWriteToTimeout(sta.Timeout)
go func() { go func() {
if _, err := common.Copy(localConn, newStream); err != nil { if _, err := common.Copy(localConn, newStream); err != nil {
log.Tracef("copying stream to proxy server: %v", err) log.Tracef("copying stream to proxy server: %v", err)
@ -308,4 +294,5 @@ func serveSession(sesh *mux.Session, ci ClientInfo, user *ActiveUser, sta *State
} }
}() }()
} }
} }

View File

@ -2,13 +2,12 @@ package server
import ( import (
"encoding/hex" "encoding/hex"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
"io" "io"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert"
) )
type rfpReturnValue struct { type rfpReturnValue struct {

View File

@ -1,64 +0,0 @@
//go:build gofuzz
// +build gofuzz
package server
import (
"errors"
"net"
"time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/connutil"
)
type rfpReturnValue_fuzz struct {
n int
transport Transport
redirOnErr bool
err error
}
func Fuzz(data []byte) int {
var bypassUID [16]byte
var pv [32]byte
sta := &State{
BypassUID: map[[16]byte]struct{}{
bypassUID: {},
},
ProxyBook: map[string]net.Addr{
"shadowsocks": nil,
},
UsedRandom: map[[32]byte]int64{},
StaticPv: &pv,
WorldState: common.RealWorldState,
}
rfp := func(conn net.Conn, buf []byte, retChan chan<- rfpReturnValue_fuzz) {
ret := rfpReturnValue_fuzz{}
ret.n, ret.transport, ret.redirOnErr, ret.err = readFirstPacket(conn, buf, 500*time.Millisecond)
retChan <- ret
}
local, remote := connutil.AsyncPipe()
buf := make([]byte, 1500)
retChan := make(chan rfpReturnValue_fuzz)
go rfp(remote, buf, retChan)
local.Write(data)
ret := <-retChan
if ret.err != nil {
return 1
}
_, _, err := AuthFirstPacket(buf[:ret.n], ret.transport, sta)
if !errors.Is(err, ErrReplay) && !errors.Is(err, ErrBadDecryption) {
return 1
}
return 0
}

View File

@ -5,26 +5,26 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"io/ioutil" "io/ioutil"
"net" "net"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
) )
type RawConfig struct { type RawConfig struct {
ProxyBook map[string][]string ProxyBook map[string][]string
BindAddr []string BindAddr []string
BypassUID [][]byte BypassUID [][]byte
RedirAddr string RedirAddr string
PrivateKey []byte PrivateKey []byte
AdminUID []byte AdminUID []byte
DatabasePath string DatabasePath string
KeepAlive int StreamTimeout int
CncMode bool KeepAlive int
CncMode bool
} }
// State type stores the global state of the program // State type stores the global state of the program
@ -34,6 +34,8 @@ type State struct {
WorldState common.WorldState WorldState common.WorldState
AdminUID []byte AdminUID []byte
Timeout time.Duration
//KeepAlive time.Duration
BypassUID map[[16]byte]struct{} BypassUID map[[16]byte]struct{}
StaticPv crypto.PrivateKey StaticPv crypto.PrivateKey
@ -109,7 +111,6 @@ func parseProxyBook(bookEntries map[string][]string) (map[string]net.Addr, error
return proxyBook, nil return proxyBook, nil
} }
// ParseConfig reads the config file or semicolon-separated options and parse them into a RawConfig
func ParseConfig(conf string) (raw RawConfig, err error) { func ParseConfig(conf string) (raw RawConfig, err error) {
content, errPath := ioutil.ReadFile(conf) content, errPath := ioutil.ReadFile(conf)
if errPath != nil { if errPath != nil {
@ -131,7 +132,7 @@ func ParseConfig(conf string) (raw RawConfig, err error) {
return return
} }
// InitState process the RawConfig and initialises a server State accordingly // ParseConfig parses the config (either a path to json or the json itself as argument) into a State variable
func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, err error) { func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, err error) {
sta = &State{ sta = &State{
BypassUID: make(map[[16]byte]struct{}), BypassUID: make(map[[16]byte]struct{}),
@ -144,18 +145,19 @@ func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, er
err = errors.New("command & control mode not implemented") err = errors.New("command & control mode not implemented")
return return
} else { } else {
var manager usermanager.UserManager manager, err := usermanager.MakeLocalManager(preParse.DatabasePath, worldState)
if len(preParse.AdminUID) == 0 || preParse.DatabasePath == "" { if err != nil {
manager = &usermanager.Voidmanager{} return sta, err
} else {
manager, err = usermanager.MakeLocalManager(preParse.DatabasePath, worldState)
if err != nil {
return sta, err
}
} }
sta.Panel = MakeUserPanel(manager) sta.Panel = MakeUserPanel(manager)
} }
if preParse.StreamTimeout == 0 {
sta.Timeout = time.Duration(300) * time.Second
} else {
sta.Timeout = time.Duration(preParse.StreamTimeout) * time.Second
}
if preParse.KeepAlive <= 0 { if preParse.KeepAlive <= 0 {
sta.ProxyDialer = &net.Dialer{KeepAlive: -1} sta.ProxyDialer = &net.Dialer{KeepAlive: -1}
} else { } else {
@ -174,10 +176,6 @@ func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, er
return return
} }
if len(preParse.PrivateKey) == 0 {
err = fmt.Errorf("must have a valid private key. Run `ck-server -key` to generate one")
return
}
var pv [32]byte var pv [32]byte
copy(pv[:], preParse.PrivateKey) copy(pv[:], preParse.PrivateKey)
sta.StaticPv = &pv sta.StaticPv = &pv
@ -189,10 +187,8 @@ func InitState(preParse RawConfig, worldState common.WorldState) (sta *State, er
copy(arrUID[:], UID) copy(arrUID[:], UID)
sta.BypassUID[arrUID] = struct{}{} sta.BypassUID[arrUID] = struct{}{}
} }
if len(sta.AdminUID) != 0 { copy(arrUID[:], sta.AdminUID)
copy(arrUID[:], sta.AdminUID) sta.BypassUID[arrUID] = struct{}{}
sta.BypassUID[arrUID] = struct{}{}
}
go sta.UsedRandomCleaner() go sta.UsedRandomCleaner()
return sta, nil return sta, nil
@ -206,17 +202,17 @@ func (sta *State) IsBypass(UID []byte) bool {
return exist return exist
} }
const timestampTolerance = 180 * time.Second const TIMESTAMP_TOLERANCE = 180 * time.Second
const replayCacheAgeLimit = 12 * time.Hour const CACHE_CLEAN_INTERVAL = 12 * time.Hour
// UsedRandomCleaner clears the cache of used random fields every replayCacheAgeLimit // UsedRandomCleaner clears the cache of used random fields every CACHE_CLEAN_INTERVAL
func (sta *State) UsedRandomCleaner() { func (sta *State) UsedRandomCleaner() {
for { for {
time.Sleep(replayCacheAgeLimit) time.Sleep(CACHE_CLEAN_INTERVAL)
sta.usedRandomM.Lock() sta.usedRandomM.Lock()
for key, t := range sta.UsedRandom { for key, t := range sta.UsedRandom {
if time.Unix(t, 0).Before(sta.WorldState.Now().Add(timestampTolerance)) { if time.Unix(t, 0).Before(sta.WorldState.Now().Add(TIMESTAMP_TOLERANCE)) {
delete(sta.UsedRandom, key) delete(sta.UsedRandom, key)
} }
} }

View File

@ -43,22 +43,13 @@ func TestParseRedirAddr(t *testing.T) {
t.Errorf("parsing %v error: %v", domainNoPort, err) t.Errorf("parsing %v error: %v", domainNoPort, err)
return return
} }
expHost, err := net.ResolveIPAddr("ip", "example.com")
expIPs, err := net.LookupIP("example.com")
if err != nil { if err != nil {
t.Errorf("tester error: cannot resolve example.com: %v", err) t.Errorf("tester error: cannot resolve example.com: %v", err)
return return
} }
if host.String() != expHost.String() {
contain := false t.Errorf("expected %v got %v", expHost.String(), host.String())
for _, expIP := range expIPs {
if expIP.String() == host.String() {
contain = true
}
}
if !contain {
t.Errorf("expected one of %v got %v", expIPs, host.String())
} }
if port != "" { if port != "" {
t.Errorf("port not empty when there is no port") t.Errorf("port not empty when there is no port")
@ -72,22 +63,13 @@ func TestParseRedirAddr(t *testing.T) {
t.Errorf("parsing %v error: %v", domainWPort, err) t.Errorf("parsing %v error: %v", domainWPort, err)
return return
} }
expHost, err := net.ResolveIPAddr("ip", "example.com")
expIPs, err := net.LookupIP("example.com")
if err != nil { if err != nil {
t.Errorf("tester error: cannot resolve example.com: %v", err) t.Errorf("tester error: cannot resolve example.com: %v", err)
return return
} }
if host.String() != expHost.String() {
contain := false t.Errorf("expected %v got %v", expHost.String(), host.String())
for _, expIP := range expIPs {
if expIP.String() == host.String() {
contain = true
}
}
if !contain {
t.Errorf("expected one of %v got %v", expIPs, host.String())
} }
if port != "80" { if port != "80" {
t.Errorf("wrong port: expected %v, got %v", "80", port) t.Errorf("wrong port: expected %v, got %v", "80", port)

View File

@ -2,7 +2,7 @@ swagger: '2.0'
info: info:
description: | description: |
This is the API of Cloak server This is the API of Cloak server
version: 0.0.2 version: 1.0.0
title: Cloak Server title: Cloak Server
contact: contact:
email: cbeuw.andy@gmail.com email: cbeuw.andy@gmail.com
@ -12,6 +12,8 @@ info:
# host: petstore.swagger.io # host: petstore.swagger.io
# basePath: /v2 # basePath: /v2
tags: tags:
- name: admin
description: Endpoints used by the host administrators
- name: users - name: users
description: Operations related to user controls by admin description: Operations related to user controls by admin
# schemes: # schemes:
@ -20,6 +22,7 @@ paths:
/admin/users: /admin/users:
get: get:
tags: tags:
- admin
- users - users
summary: Show all users summary: Show all users
description: Returns an array of all UserInfo description: Returns an array of all UserInfo
@ -38,6 +41,7 @@ paths:
/admin/users/{UID}: /admin/users/{UID}:
get: get:
tags: tags:
- admin
- users - users
summary: Show userinfo by UID summary: Show userinfo by UID
description: Returns a UserInfo object description: Returns a UserInfo object
@ -64,6 +68,7 @@ paths:
description: internal error description: internal error
post: post:
tags: tags:
- admin
- users - users
summary: Updates the userinfo of the specified user, if the user does not exist, then a new user is created summary: Updates the userinfo of the specified user, if the user does not exist, then a new user is created
operationId: writeUserInfo operationId: writeUserInfo
@ -95,6 +100,7 @@ paths:
description: internal error description: internal error
delete: delete:
tags: tags:
- admin
- users - users
summary: Deletes a user summary: Deletes a user
operationId: deleteUser operationId: deleteUser

View File

@ -92,8 +92,13 @@ func (ar *APIRouter) writeUserInfoHlr(w http.ResponseWriter, r *http.Request) {
return return
} }
jsonUinfo := r.FormValue("UserInfo")
if jsonUinfo == "" {
http.Error(w, "UserInfo cannot be empty", http.StatusBadRequest)
return
}
var uinfo UserInfo var uinfo UserInfo
err = json.NewDecoder(r.Body).Decode(&uinfo) err = json.Unmarshal([]byte(jsonUinfo), &uinfo)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return

View File

@ -1,214 +0,0 @@
package usermanager
import (
"bytes"
"encoding/base64"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
var mockUIDb64 = base64.URLEncoding.EncodeToString(mockUID)
func makeRouter(t *testing.T) (router *APIRouter, cleaner func()) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
cleaner = func() { os.Remove(tmpDB.Name()) }
mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState)
if err != nil {
t.Fatal(err)
}
router = APIRouterOf(mgr)
return router, cleaner
}
func TestWriteUserInfoHlr(t *testing.T) {
router, cleaner := makeRouter(t)
defer cleaner()
marshalled, err := json.Marshal(mockUserInfo)
if err != nil {
t.Fatal(err)
}
t.Run("ok", func(t *testing.T) {
req, err := http.NewRequest("POST", "/admin/users/"+mockUIDb64, bytes.NewBuffer(marshalled))
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equalf(t, http.StatusCreated, rr.Code, "response body: %v", rr.Body)
})
t.Run("partial update", func(t *testing.T) {
req, err := http.NewRequest("POST", "/admin/users/"+mockUIDb64, bytes.NewBuffer(marshalled))
assert.NoError(t, err)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equal(t, http.StatusCreated, rr.Code)
partialUserInfo := UserInfo{
UID: mockUID,
SessionsCap: JustInt32(10),
}
partialMarshalled, _ := json.Marshal(partialUserInfo)
req, err = http.NewRequest("POST", "/admin/users/"+mockUIDb64, bytes.NewBuffer(partialMarshalled))
assert.NoError(t, err)
router.ServeHTTP(rr, req)
assert.Equal(t, http.StatusCreated, rr.Code)
req, err = http.NewRequest("GET", "/admin/users/"+mockUIDb64, nil)
assert.NoError(t, err)
router.ServeHTTP(rr, req)
assert.Equal(t, http.StatusCreated, rr.Code)
var got UserInfo
err = json.Unmarshal(rr.Body.Bytes(), &got)
assert.NoError(t, err)
expected := mockUserInfo
expected.SessionsCap = partialUserInfo.SessionsCap
assert.EqualValues(t, expected, got)
})
t.Run("empty parameter", func(t *testing.T) {
req, err := http.NewRequest("POST", "/admin/users/", bytes.NewBuffer(marshalled))
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equalf(t, http.StatusMethodNotAllowed, rr.Code, "response body: %v", rr.Body)
})
t.Run("UID mismatch", func(t *testing.T) {
badMock := mockUserInfo
badMock.UID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0}
badMarshal, err := json.Marshal(badMock)
if err != nil {
t.Fatal(err)
}
req, err := http.NewRequest("POST", "/admin/users/"+mockUIDb64, bytes.NewBuffer(badMarshal))
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equalf(t, http.StatusBadRequest, rr.Code, "response body: %v", rr.Body)
})
t.Run("garbage data", func(t *testing.T) {
req, err := http.NewRequest("POST", "/admin/users/"+mockUIDb64, bytes.NewBuffer([]byte(`{"{{'{;;}}}1`)))
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equalf(t, http.StatusBadRequest, rr.Code, "response body: %v", rr.Body)
})
t.Run("not base64", func(t *testing.T) {
req, err := http.NewRequest("POST", "/admin/users/"+"defonotbase64", bytes.NewBuffer(marshalled))
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equalf(t, http.StatusBadRequest, rr.Code, "response body: %v", rr.Body)
})
}
func addUser(t *testing.T, router *APIRouter, user UserInfo) {
marshalled, err := json.Marshal(user)
if err != nil {
t.Fatal(err)
}
req, err := http.NewRequest("POST", "/admin/users/"+base64.URLEncoding.EncodeToString(user.UID), bytes.NewBuffer(marshalled))
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
assert.Equalf(t, http.StatusCreated, rr.Code, "response body: %v", rr.Body)
}
func TestGetUserInfoHlr(t *testing.T) {
router, cleaner := makeRouter(t)
defer cleaner()
t.Run("empty parameter", func(t *testing.T) {
assert.HTTPError(t, router.ServeHTTP, "GET", "/admin/users/", nil)
})
t.Run("non-existent", func(t *testing.T) {
assert.HTTPError(t, router.ServeHTTP, "GET", "/admin/users/"+base64.URLEncoding.EncodeToString([]byte("adsf")), nil)
})
t.Run("not base64", func(t *testing.T) {
assert.HTTPError(t, router.ServeHTTP, "GET", "/admin/users/"+"defonotbase64", nil)
})
t.Run("ok", func(t *testing.T) {
addUser(t, router, mockUserInfo)
var got UserInfo
err := json.Unmarshal([]byte(assert.HTTPBody(router.ServeHTTP, "GET", "/admin/users/"+mockUIDb64, nil)), &got)
if err != nil {
t.Fatal(err)
}
assert.EqualValues(t, mockUserInfo, got)
})
}
func TestDeleteUserHlr(t *testing.T) {
router, cleaner := makeRouter(t)
defer cleaner()
t.Run("non-existent", func(t *testing.T) {
assert.HTTPError(t, router.ServeHTTP, "DELETE", "/admin/users/"+base64.URLEncoding.EncodeToString([]byte("adsf")), nil)
})
t.Run("not base64", func(t *testing.T) {
assert.HTTPError(t, router.ServeHTTP, "DELETE", "/admin/users/"+"defonotbase64", nil)
})
t.Run("ok", func(t *testing.T) {
addUser(t, router, mockUserInfo)
assert.HTTPSuccess(t, router.ServeHTTP, "DELETE", "/admin/users/"+mockUIDb64, nil)
assert.HTTPError(t, router.ServeHTTP, "GET", "/admin/users/"+mockUIDb64, nil)
})
}
func TestListAllUsersHlr(t *testing.T) {
router, cleaner := makeRouter(t)
defer cleaner()
user1 := mockUserInfo
addUser(t, router, user1)
user2 := mockUserInfo
user2.UID = []byte{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
addUser(t, router, user2)
expected := []UserInfo{user1, user2}
var got []UserInfo
err := json.Unmarshal([]byte(assert.HTTPBody(router.ServeHTTP, "GET", "/admin/users", nil)), &got)
if err != nil {
t.Fatal(err)
}
assert.True(t, assert.Subset(t, got, expected), assert.Subset(t, expected, got))
}

View File

@ -2,7 +2,6 @@ package usermanager
import ( import (
"encoding/binary" "encoding/binary"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
@ -128,7 +127,6 @@ func (manager *localManager) UploadStatus(uploads []StatusUpdate) ([]StatusRespo
"User no longer exists", "User no longer exists",
} }
responses = append(responses, resp) responses = append(responses, resp)
continue
} }
oldUp := int64(u64(bucket.Get([]byte("UpCredit")))) oldUp := int64(u64(bucket.Get([]byte("UpCredit"))))
@ -181,20 +179,17 @@ func (manager *localManager) ListAllUsers() (infos []UserInfo, err error) {
err = tx.ForEach(func(UID []byte, bucket *bolt.Bucket) error { err = tx.ForEach(func(UID []byte, bucket *bolt.Bucket) error {
var uinfo UserInfo var uinfo UserInfo
uinfo.UID = UID uinfo.UID = UID
uinfo.SessionsCap = JustInt32(int32(u32(bucket.Get([]byte("SessionsCap"))))) uinfo.SessionsCap = int32(u32(bucket.Get([]byte("SessionsCap"))))
uinfo.UpRate = JustInt64(int64(u64(bucket.Get([]byte("UpRate"))))) uinfo.UpRate = int64(u64(bucket.Get([]byte("UpRate"))))
uinfo.DownRate = JustInt64(int64(u64(bucket.Get([]byte("DownRate"))))) uinfo.DownRate = int64(u64(bucket.Get([]byte("DownRate"))))
uinfo.UpCredit = JustInt64(int64(u64(bucket.Get([]byte("UpCredit"))))) uinfo.UpCredit = int64(u64(bucket.Get([]byte("UpCredit"))))
uinfo.DownCredit = JustInt64(int64(u64(bucket.Get([]byte("DownCredit"))))) uinfo.DownCredit = int64(u64(bucket.Get([]byte("DownCredit"))))
uinfo.ExpiryTime = JustInt64(int64(u64(bucket.Get([]byte("ExpiryTime"))))) uinfo.ExpiryTime = int64(u64(bucket.Get([]byte("ExpiryTime"))))
infos = append(infos, uinfo) infos = append(infos, uinfo)
return nil return nil
}) })
return err return err
}) })
if infos == nil {
infos = []UserInfo{}
}
return return
} }
@ -205,52 +200,40 @@ func (manager *localManager) GetUserInfo(UID []byte) (uinfo UserInfo, err error)
return ErrUserNotFound return ErrUserNotFound
} }
uinfo.UID = UID uinfo.UID = UID
uinfo.SessionsCap = JustInt32(int32(u32(bucket.Get([]byte("SessionsCap"))))) uinfo.SessionsCap = int32(u32(bucket.Get([]byte("SessionsCap"))))
uinfo.UpRate = JustInt64(int64(u64(bucket.Get([]byte("UpRate"))))) uinfo.UpRate = int64(u64(bucket.Get([]byte("UpRate"))))
uinfo.DownRate = JustInt64(int64(u64(bucket.Get([]byte("DownRate"))))) uinfo.DownRate = int64(u64(bucket.Get([]byte("DownRate"))))
uinfo.UpCredit = JustInt64(int64(u64(bucket.Get([]byte("UpCredit"))))) uinfo.UpCredit = int64(u64(bucket.Get([]byte("UpCredit"))))
uinfo.DownCredit = JustInt64(int64(u64(bucket.Get([]byte("DownCredit"))))) uinfo.DownCredit = int64(u64(bucket.Get([]byte("DownCredit"))))
uinfo.ExpiryTime = JustInt64(int64(u64(bucket.Get([]byte("ExpiryTime"))))) uinfo.ExpiryTime = int64(u64(bucket.Get([]byte("ExpiryTime"))))
return nil return nil
}) })
return return
} }
func (manager *localManager) WriteUserInfo(u UserInfo) (err error) { func (manager *localManager) WriteUserInfo(uinfo UserInfo) (err error) {
err = manager.db.Update(func(tx *bolt.Tx) error { err = manager.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(u.UID) bucket, err := tx.CreateBucketIfNotExists(uinfo.UID)
if err != nil { if err != nil {
return err return err
} }
if u.SessionsCap != nil { if err = bucket.Put([]byte("SessionsCap"), i32ToB(int32(uinfo.SessionsCap))); err != nil {
if err = bucket.Put([]byte("SessionsCap"), i32ToB(*u.SessionsCap)); err != nil { return err
return err
}
} }
if u.UpRate != nil { if err = bucket.Put([]byte("UpRate"), i64ToB(uinfo.UpRate)); err != nil {
if err = bucket.Put([]byte("UpRate"), i64ToB(*u.UpRate)); err != nil { return err
return err
}
} }
if u.DownRate != nil { if err = bucket.Put([]byte("DownRate"), i64ToB(uinfo.DownRate)); err != nil {
if err = bucket.Put([]byte("DownRate"), i64ToB(*u.DownRate)); err != nil { return err
return err
}
} }
if u.UpCredit != nil { if err = bucket.Put([]byte("UpCredit"), i64ToB(uinfo.UpCredit)); err != nil {
if err = bucket.Put([]byte("UpCredit"), i64ToB(*u.UpCredit)); err != nil { return err
return err
}
} }
if u.DownCredit != nil { if err = bucket.Put([]byte("DownCredit"), i64ToB(uinfo.DownCredit)); err != nil {
if err = bucket.Put([]byte("DownCredit"), i64ToB(*u.DownCredit)); err != nil { return err
return err
}
} }
if u.ExpiryTime != nil { if err = bucket.Put([]byte("ExpiryTime"), i64ToB(uinfo.ExpiryTime)); err != nil {
if err = bucket.Put([]byte("ExpiryTime"), i64ToB(*u.ExpiryTime)); err != nil { return err
return err
}
} }
return nil return nil
}) })

View File

@ -2,6 +2,7 @@ package usermanager
import ( import (
"encoding/binary" "encoding/binary"
"github.com/cbeuw/Cloak/internal/common"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
"os" "os"
@ -10,63 +11,42 @@ import (
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/stretchr/testify/assert"
) )
var mockUID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} var mockUID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
var mockWorldState = common.WorldOfTime(time.Unix(1, 0)) var mockWorldState = common.WorldOfTime(time.Unix(1, 0))
var mockUserInfo = UserInfo{ var mockUserInfo = UserInfo{
UID: mockUID, UID: mockUID,
SessionsCap: JustInt32(10), SessionsCap: 0,
UpRate: JustInt64(100), UpRate: 0,
DownRate: JustInt64(1000), DownRate: 0,
UpCredit: JustInt64(10000), UpCredit: 0,
DownCredit: JustInt64(100000), DownCredit: 0,
ExpiryTime: JustInt64(1000000), ExpiryTime: 100,
} }
func makeManager(t *testing.T) (mgr *localManager, cleaner func()) { func TestLocalManager_WriteUserInfo(t *testing.T) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info") var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
cleaner = func() { os.Remove(tmpDB.Name()) } defer os.Remove(tmpDB.Name())
mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState) mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
return mgr, cleaner
}
func TestLocalManager_WriteUserInfo(t *testing.T) { err = mgr.WriteUserInfo(mockUserInfo)
mgr, cleaner := makeManager(t)
defer cleaner()
err := mgr.WriteUserInfo(mockUserInfo)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
got, err := mgr.GetUserInfo(mockUID)
assert.NoError(t, err)
assert.EqualValues(t, mockUserInfo, got)
/* Partial update */
err = mgr.WriteUserInfo(UserInfo{
UID: mockUID,
SessionsCap: JustInt32(*mockUserInfo.SessionsCap + 1),
})
assert.NoError(t, err)
expected := mockUserInfo
expected.SessionsCap = JustInt32(*mockUserInfo.SessionsCap + 1)
got, err = mgr.GetUserInfo(mockUID)
assert.NoError(t, err)
assert.EqualValues(t, expected, got)
} }
func TestLocalManager_GetUserInfo(t *testing.T) { func TestLocalManager_GetUserInfo(t *testing.T) {
mgr, cleaner := makeManager(t) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer cleaner() defer os.Remove(tmpDB.Name())
mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState)
if err != nil {
t.Fatal(err)
}
t.Run("simple fetch", func(t *testing.T) { t.Run("simple fetch", func(t *testing.T) {
_ = mgr.WriteUserInfo(mockUserInfo) _ = mgr.WriteUserInfo(mockUserInfo)
@ -82,9 +62,9 @@ func TestLocalManager_GetUserInfo(t *testing.T) {
t.Run("update a field", func(t *testing.T) { t.Run("update a field", func(t *testing.T) {
_ = mgr.WriteUserInfo(mockUserInfo) _ = mgr.WriteUserInfo(mockUserInfo)
updatedUserInfo := mockUserInfo updatedUserInfo := mockUserInfo
updatedUserInfo.SessionsCap = JustInt32(*mockUserInfo.SessionsCap + 1) updatedUserInfo.SessionsCap = mockUserInfo.SessionsCap + 1
err := mgr.WriteUserInfo(updatedUserInfo) err = mgr.WriteUserInfo(updatedUserInfo)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -107,11 +87,15 @@ func TestLocalManager_GetUserInfo(t *testing.T) {
} }
func TestLocalManager_DeleteUser(t *testing.T) { func TestLocalManager_DeleteUser(t *testing.T) {
mgr, cleaner := makeManager(t) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer cleaner() defer os.Remove(tmpDB.Name())
mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState)
if err != nil {
t.Fatal(err)
}
_ = mgr.WriteUserInfo(mockUserInfo) _ = mgr.WriteUserInfo(mockUserInfo)
err := mgr.DeleteUser(mockUID) err = mgr.DeleteUser(mockUID)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -122,7 +106,15 @@ func TestLocalManager_DeleteUser(t *testing.T) {
} }
} }
var validUserInfo = mockUserInfo var validUserInfo = UserInfo{
UID: mockUID,
SessionsCap: 10,
UpRate: 100,
DownRate: 1000,
UpCredit: 10000,
DownCredit: 100000,
ExpiryTime: 1000000,
}
func TestLocalManager_AuthenticateUser(t *testing.T) { func TestLocalManager_AuthenticateUser(t *testing.T) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info") var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
@ -139,7 +131,7 @@ func TestLocalManager_AuthenticateUser(t *testing.T) {
t.Error(err) t.Error(err)
} }
if upRate != *validUserInfo.UpRate || downRate != *validUserInfo.DownRate { if upRate != validUserInfo.UpRate || downRate != validUserInfo.DownRate {
t.Error("wrong up or down rate") t.Error("wrong up or down rate")
} }
}) })
@ -153,7 +145,7 @@ func TestLocalManager_AuthenticateUser(t *testing.T) {
t.Run("expired user", func(t *testing.T) { t.Run("expired user", func(t *testing.T) {
expiredUserInfo := validUserInfo expiredUserInfo := validUserInfo
expiredUserInfo.ExpiryTime = JustInt64(mockWorldState.Now().Add(-10 * time.Second).Unix()) expiredUserInfo.ExpiryTime = mockWorldState.Now().Add(-10 * time.Second).Unix()
_ = mgr.WriteUserInfo(expiredUserInfo) _ = mgr.WriteUserInfo(expiredUserInfo)
@ -165,7 +157,7 @@ func TestLocalManager_AuthenticateUser(t *testing.T) {
t.Run("no credit", func(t *testing.T) { t.Run("no credit", func(t *testing.T) {
creditlessUserInfo := validUserInfo creditlessUserInfo := validUserInfo
creditlessUserInfo.UpCredit, creditlessUserInfo.DownCredit = JustInt64(-1), JustInt64(-1) creditlessUserInfo.UpCredit, creditlessUserInfo.DownCredit = -1, -1
_ = mgr.WriteUserInfo(creditlessUserInfo) _ = mgr.WriteUserInfo(creditlessUserInfo)
@ -177,8 +169,12 @@ func TestLocalManager_AuthenticateUser(t *testing.T) {
} }
func TestLocalManager_AuthoriseNewSession(t *testing.T) { func TestLocalManager_AuthoriseNewSession(t *testing.T) {
mgr, cleaner := makeManager(t) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer cleaner() defer os.Remove(tmpDB.Name())
mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState)
if err != nil {
t.Fatal(err)
}
t.Run("normal auth", func(t *testing.T) { t.Run("normal auth", func(t *testing.T) {
_ = mgr.WriteUserInfo(validUserInfo) _ = mgr.WriteUserInfo(validUserInfo)
@ -197,7 +193,7 @@ func TestLocalManager_AuthoriseNewSession(t *testing.T) {
t.Run("expired user", func(t *testing.T) { t.Run("expired user", func(t *testing.T) {
expiredUserInfo := validUserInfo expiredUserInfo := validUserInfo
expiredUserInfo.ExpiryTime = JustInt64(mockWorldState.Now().Add(-10 * time.Second).Unix()) expiredUserInfo.ExpiryTime = mockWorldState.Now().Add(-10 * time.Second).Unix()
_ = mgr.WriteUserInfo(expiredUserInfo) _ = mgr.WriteUserInfo(expiredUserInfo)
err := mgr.AuthoriseNewSession(expiredUserInfo.UID, AuthorisationInfo{NumExistingSessions: 0}) err := mgr.AuthoriseNewSession(expiredUserInfo.UID, AuthorisationInfo{NumExistingSessions: 0})
@ -208,7 +204,7 @@ func TestLocalManager_AuthoriseNewSession(t *testing.T) {
t.Run("too many sessions", func(t *testing.T) { t.Run("too many sessions", func(t *testing.T) {
_ = mgr.WriteUserInfo(validUserInfo) _ = mgr.WriteUserInfo(validUserInfo)
err := mgr.AuthoriseNewSession(validUserInfo.UID, AuthorisationInfo{NumExistingSessions: int(*validUserInfo.SessionsCap + 1)}) err := mgr.AuthoriseNewSession(validUserInfo.UID, AuthorisationInfo{NumExistingSessions: int(validUserInfo.SessionsCap + 1)})
if err != ErrSessionsCapReached { if err != ErrSessionsCapReached {
t.Error("session cap not reached") t.Error("session cap not reached")
} }
@ -216,9 +212,12 @@ func TestLocalManager_AuthoriseNewSession(t *testing.T) {
} }
func TestLocalManager_UploadStatus(t *testing.T) { func TestLocalManager_UploadStatus(t *testing.T) {
mgr, cleaner := makeManager(t) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer cleaner() defer os.Remove(tmpDB.Name())
mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState)
if err != nil {
t.Fatal(err)
}
t.Run("simple update", func(t *testing.T) { t.Run("simple update", func(t *testing.T) {
_ = mgr.WriteUserInfo(validUserInfo) _ = mgr.WriteUserInfo(validUserInfo)
@ -241,10 +240,10 @@ func TestLocalManager_UploadStatus(t *testing.T) {
t.Error(err) t.Error(err)
} }
if *updatedUserInfo.UpCredit != *validUserInfo.UpCredit-update.UpUsage { if updatedUserInfo.UpCredit != validUserInfo.UpCredit-update.UpUsage {
t.Error("up usage incorrect") t.Error("up usage incorrect")
} }
if *updatedUserInfo.DownCredit != *validUserInfo.DownCredit-update.DownUsage { if updatedUserInfo.DownCredit != validUserInfo.DownCredit-update.DownUsage {
t.Error("down usage incorrect") t.Error("down usage incorrect")
} }
}) })
@ -260,7 +259,7 @@ func TestLocalManager_UploadStatus(t *testing.T) {
UID: validUserInfo.UID, UID: validUserInfo.UID,
Active: true, Active: true,
NumSession: 1, NumSession: 1,
UpUsage: *validUserInfo.UpCredit + 100, UpUsage: validUserInfo.UpCredit + 100,
DownUsage: 0, DownUsage: 0,
Timestamp: mockWorldState.Now().Unix(), Timestamp: mockWorldState.Now().Unix(),
}, },
@ -272,19 +271,19 @@ func TestLocalManager_UploadStatus(t *testing.T) {
Active: true, Active: true,
NumSession: 1, NumSession: 1,
UpUsage: 0, UpUsage: 0,
DownUsage: *validUserInfo.DownCredit + 100, DownUsage: validUserInfo.DownCredit + 100,
Timestamp: mockWorldState.Now().Unix(), Timestamp: mockWorldState.Now().Unix(),
}, },
}, },
{"expired", {"expired",
UserInfo{ UserInfo{
UID: mockUID, UID: mockUID,
SessionsCap: JustInt32(10), SessionsCap: 10,
UpRate: JustInt64(0), UpRate: 0,
DownRate: JustInt64(0), DownRate: 0,
UpCredit: JustInt64(0), UpCredit: 0,
DownCredit: JustInt64(0), DownCredit: 0,
ExpiryTime: JustInt64(-1), ExpiryTime: -1,
}, },
StatusUpdate{ StatusUpdate{
UID: mockUserInfo.UID, UID: mockUserInfo.UID,
@ -319,8 +318,12 @@ func TestLocalManager_UploadStatus(t *testing.T) {
} }
func TestLocalManager_ListAllUsers(t *testing.T) { func TestLocalManager_ListAllUsers(t *testing.T) {
mgr, cleaner := makeManager(t) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer cleaner() defer os.Remove(tmpDB.Name())
mgr, err := MakeLocalManager(tmpDB.Name(), mockWorldState)
if err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup var wg sync.WaitGroup
var users []UserInfo var users []UserInfo
@ -329,12 +332,12 @@ func TestLocalManager_ListAllUsers(t *testing.T) {
rand.Read(randUID) rand.Read(randUID)
newUser := UserInfo{ newUser := UserInfo{
UID: randUID, UID: randUID,
SessionsCap: JustInt32(rand.Int31()), SessionsCap: rand.Int31(),
UpRate: JustInt64(rand.Int63()), UpRate: rand.Int63(),
DownRate: JustInt64(rand.Int63()), DownRate: rand.Int63(),
UpCredit: JustInt64(rand.Int63()), UpCredit: rand.Int63(),
DownCredit: JustInt64(rand.Int63()), DownCredit: rand.Int63(),
ExpiryTime: JustInt64(rand.Int63()), ExpiryTime: rand.Int63(),
} }
users = append(users, newUser) users = append(users, newUser)
wg.Add(1) wg.Add(1)

View File

@ -14,23 +14,16 @@ type StatusUpdate struct {
Timestamp int64 Timestamp int64
} }
type MaybeInt32 *int32
type MaybeInt64 *int64
type UserInfo struct { type UserInfo struct {
UID []byte UID []byte
SessionsCap MaybeInt32 SessionsCap int32
UpRate MaybeInt64 UpRate int64
DownRate MaybeInt64 DownRate int64
UpCredit MaybeInt64 UpCredit int64
DownCredit MaybeInt64 DownCredit int64
ExpiryTime MaybeInt64 ExpiryTime int64
} }
func JustInt32(v int32) MaybeInt32 { return &v }
func JustInt64(v int64) MaybeInt64 { return &v }
type StatusResponse struct { type StatusResponse struct {
UID []byte UID []byte
Action int Action int
@ -47,7 +40,6 @@ const (
var ErrUserNotFound = errors.New("UID does not correspond to a user") var ErrUserNotFound = errors.New("UID does not correspond to a user")
var ErrSessionsCapReached = errors.New("Sessions cap has reached") var ErrSessionsCapReached = errors.New("Sessions cap has reached")
var ErrMangerIsVoid = errors.New("cannot perform operation with user manager as database path is not specified")
var ErrNoUpCredit = errors.New("No upload credit left") var ErrNoUpCredit = errors.New("No upload credit left")
var ErrNoDownCredit = errors.New("No download credit left") var ErrNoDownCredit = errors.New("No download credit left")

View File

@ -1,31 +0,0 @@
package usermanager
type Voidmanager struct{}
func (v *Voidmanager) AuthenticateUser(bytes []byte) (int64, int64, error) {
return 0, 0, ErrMangerIsVoid
}
func (v *Voidmanager) AuthoriseNewSession(bytes []byte, info AuthorisationInfo) error {
return ErrMangerIsVoid
}
func (v *Voidmanager) UploadStatus(updates []StatusUpdate) ([]StatusResponse, error) {
return nil, ErrMangerIsVoid
}
func (v *Voidmanager) ListAllUsers() ([]UserInfo, error) {
return []UserInfo{}, ErrMangerIsVoid
}
func (v *Voidmanager) GetUserInfo(UID []byte) (UserInfo, error) {
return UserInfo{}, ErrMangerIsVoid
}
func (v *Voidmanager) WriteUserInfo(info UserInfo) error {
return ErrMangerIsVoid
}
func (v *Voidmanager) DeleteUser(UID []byte) error {
return ErrMangerIsVoid
}

View File

@ -1,44 +0,0 @@
package usermanager
import (
"testing"
"github.com/stretchr/testify/assert"
)
var v = &Voidmanager{}
func Test_Voidmanager_AuthenticateUser(t *testing.T) {
_, _, err := v.AuthenticateUser([]byte{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_AuthoriseNewSession(t *testing.T) {
err := v.AuthoriseNewSession([]byte{}, AuthorisationInfo{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_DeleteUser(t *testing.T) {
err := v.DeleteUser([]byte{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_GetUserInfo(t *testing.T) {
_, err := v.GetUserInfo([]byte{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_ListAllUsers(t *testing.T) {
_, err := v.ListAllUsers()
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_UploadStatus(t *testing.T) {
_, err := v.UploadStatus([]StatusUpdate{})
assert.Equal(t, ErrMangerIsVoid, err)
}
func Test_Voidmanager_WriteUserInfo(t *testing.T) {
err := v.WriteUserInfo(UserInfo{})
assert.Equal(t, ErrMangerIsVoid, err)
}

View File

@ -2,12 +2,11 @@ package server
import ( import (
"encoding/base64" "encoding/base64"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/cbeuw/Cloak/internal/server/usermanager"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -186,9 +185,6 @@ func (panel *userPanel) commitUpdate() error {
panel.usageUpdateQueue = make(map[[16]byte]*usagePair) panel.usageUpdateQueue = make(map[[16]byte]*usagePair)
panel.usageUpdateQueueM.Unlock() panel.usageUpdateQueueM.Unlock()
if len(statuses) == 0 {
return nil
}
responses, err := panel.Manager.UploadStatus(statuses) responses, err := panel.Manager.UploadStatus(statuses)
if err != nil { if err != nil {
return err return err

View File

@ -2,13 +2,12 @@ package server
import ( import (
"encoding/base64" "encoding/base64"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/server/usermanager"
) )
func TestUserPanel_BypassUser(t *testing.T) { func TestUserPanel_BypassUser(t *testing.T) {
@ -67,12 +66,12 @@ var mockUID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
var mockWorldState = common.WorldOfTime(time.Unix(1, 0)) var mockWorldState = common.WorldOfTime(time.Unix(1, 0))
var validUserInfo = usermanager.UserInfo{ var validUserInfo = usermanager.UserInfo{
UID: mockUID, UID: mockUID,
SessionsCap: usermanager.JustInt32(10), SessionsCap: 10,
UpRate: usermanager.JustInt64(100), UpRate: 100,
DownRate: usermanager.JustInt64(1000), DownRate: 1000,
UpCredit: usermanager.JustInt64(10000), UpCredit: 10000,
DownCredit: usermanager.JustInt64(100000), DownCredit: 100000,
ExpiryTime: usermanager.JustInt64(1000000), ExpiryTime: 1000000,
} }
func TestUserPanel_GetUser(t *testing.T) { func TestUserPanel_GetUser(t *testing.T) {
@ -139,10 +138,10 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
} }
updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID) updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID)
if *updatedUinfo.DownCredit != *validUserInfo.DownCredit-1 { if updatedUinfo.DownCredit != validUserInfo.DownCredit-1 {
t.Error("down credit incorrect update") t.Error("down credit incorrect update")
} }
if *updatedUinfo.UpCredit != *validUserInfo.UpCredit-2 { if updatedUinfo.UpCredit != validUserInfo.UpCredit-2 {
t.Error("up credit incorrect update") t.Error("up credit incorrect update")
} }
@ -156,10 +155,10 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
} }
updatedUinfo, _ = mgr.GetUserInfo(validUserInfo.UID) updatedUinfo, _ = mgr.GetUserInfo(validUserInfo.UID)
if *updatedUinfo.DownCredit != *validUserInfo.DownCredit-(1+3) { if updatedUinfo.DownCredit != validUserInfo.DownCredit-(1+3) {
t.Error("down credit incorrect update") t.Error("down credit incorrect update")
} }
if *updatedUinfo.UpCredit != *validUserInfo.UpCredit-(2+4) { if updatedUinfo.UpCredit != validUserInfo.UpCredit-(2+4) {
t.Error("up credit incorrect update") t.Error("up credit incorrect update")
} }
}) })
@ -171,7 +170,7 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
t.Error(err) t.Error(err)
} }
user.valve.AddTx(*validUserInfo.DownCredit + 100) user.valve.AddTx(validUserInfo.DownCredit + 100)
panel.updateUsageQueue() panel.updateUsageQueue()
err = panel.commitUpdate() err = panel.commitUpdate()
if err != nil { if err != nil {
@ -183,7 +182,7 @@ func TestUserPanel_UpdateUsageQueue(t *testing.T) {
} }
updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID) updatedUinfo, _ := mgr.GetUserInfo(validUserInfo.UID)
if *updatedUinfo.DownCredit != -100 { if updatedUinfo.DownCredit != -100 {
t.Error("down credit not updated correctly after the user has been terminated") t.Error("down credit not updated correctly after the user has been terminated")
} }
}) })

View File

@ -7,12 +7,11 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh"
"io" "io"
"net" "net"
"net/http" "net/http"
"github.com/cbeuw/Cloak/internal/common"
"github.com/cbeuw/Cloak/internal/ecdh"
) )
type WebSocket struct{} type WebSocket struct{}
@ -85,13 +84,7 @@ func (WebSocket) unmarshalHidden(hidden []byte, staticPv crypto.PrivateKey) (fra
return return
} }
var sharedSecret []byte copy(fragments.sharedSecret[:], ecdh.GenerateSharedSecret(staticPv, ephPub))
sharedSecret, err = ecdh.GenerateSharedSecret(staticPv, ephPub)
if err != nil {
return
}
copy(fragments.sharedSecret[:], sharedSecret)
if len(hidden[32:]) != 64 { if len(hidden[32:]) != 64 {
err = fmt.Errorf("%v: %v", ErrCiphertextLength, len(hidden[32:])) err = fmt.Errorf("%v: %v", ErrCiphertextLength, len(hidden[32:]))

View File

@ -2,11 +2,10 @@ package server
import ( import (
"errors" "errors"
"net"
"net/http"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"net"
"net/http"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )

View File

@ -2,9 +2,8 @@ package server
import ( import (
"bytes" "bytes"
"testing"
"github.com/cbeuw/connutil" "github.com/cbeuw/connutil"
"testing"
) )
func TestFirstBuffedConn_Read(t *testing.T) { func TestFirstBuffedConn_Read(t *testing.T) {

View File

@ -3,21 +3,20 @@ package test
import ( import (
"bytes" "bytes"
"encoding/base64" "encoding/base64"
"encoding/binary"
"fmt" "fmt"
"io"
"math/rand"
"net"
"sync"
"testing"
"time"
"github.com/cbeuw/Cloak/internal/client" "github.com/cbeuw/Cloak/internal/client"
"github.com/cbeuw/Cloak/internal/common" "github.com/cbeuw/Cloak/internal/common"
mux "github.com/cbeuw/Cloak/internal/multiplex" mux "github.com/cbeuw/Cloak/internal/multiplex"
"github.com/cbeuw/Cloak/internal/server" "github.com/cbeuw/Cloak/internal/server"
"github.com/cbeuw/connutil" "github.com/cbeuw/connutil"
"github.com/stretchr/testify/assert" "io"
"io/ioutil"
"math/rand"
"net"
"os"
"sync"
"testing"
"time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -31,14 +30,15 @@ func serveTCPEcho(l net.Listener) {
log.Error(err) log.Error(err)
return return
} }
go func(conn net.Conn) { go func() {
conn := conn
_, err := io.Copy(conn, conn) _, err := io.Copy(conn, conn)
if err != nil { if err != nil {
conn.Close() conn.Close()
log.Error(err) log.Error(err)
return return
} }
}(conn) }()
} }
} }
@ -50,7 +50,8 @@ func serveUDPEcho(listener *connutil.PipeListener) {
return return
} }
const bufSize = 32 * 1024 const bufSize = 32 * 1024
go func(conn net.PacketConn) { go func() {
conn := conn
defer conn.Close() defer conn.Close()
buf := make([]byte, bufSize) buf := make([]byte, bufSize)
for { for {
@ -69,7 +70,7 @@ func serveUDPEcho(listener *connutil.PipeListener) {
return return
} }
} }
}(conn) }()
} }
} }
@ -77,70 +78,40 @@ var bypassUID = [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
var publicKey, _ = base64.StdEncoding.DecodeString("7f7TuKrs264VNSgMno8PkDlyhGhVuOSR8JHLE6H4Ljc=") var publicKey, _ = base64.StdEncoding.DecodeString("7f7TuKrs264VNSgMno8PkDlyhGhVuOSR8JHLE6H4Ljc=")
var privateKey, _ = base64.StdEncoding.DecodeString("SMWeC6VuZF8S/id65VuFQFlfa7hTEJBpL6wWhqPP100=") var privateKey, _ = base64.StdEncoding.DecodeString("SMWeC6VuZF8S/id65VuFQFlfa7hTEJBpL6wWhqPP100=")
var basicUDPConfig = client.RawConfig{ func basicClientConfigs(state common.WorldState) (client.LocalConnConfig, client.RemoteConnConfig, client.AuthInfo) {
ServerName: "www.example.com", var clientConfig = client.RawConfig{
ProxyMethod: "openvpn", ServerName: "www.example.com",
EncryptionMethod: "plain", ProxyMethod: "tcp",
UID: bypassUID[:], EncryptionMethod: "plain",
PublicKey: publicKey, UID: bypassUID[:],
NumConn: 4, PublicKey: publicKey,
UDP: true, NumConn: 4,
Transport: "direct", UDP: false,
RemoteHost: "fake.com", Transport: "direct",
RemotePort: "9999", RemoteHost: "fake.com",
LocalHost: "127.0.0.1", RemotePort: "9999",
LocalPort: "9999", LocalHost: "127.0.0.1",
} LocalPort: "9999",
}
var basicTCPConfig = client.RawConfig{ lcl, rmt, auth, err := clientConfig.SplitConfigs(state)
ServerName: "www.example.com",
ProxyMethod: "shadowsocks",
EncryptionMethod: "plain",
UID: bypassUID[:],
PublicKey: publicKey,
NumConn: 4,
UDP: false,
Transport: "direct",
RemoteHost: "fake.com",
RemotePort: "9999",
LocalHost: "127.0.0.1",
LocalPort: "9999",
BrowserSig: "firefox",
}
var singleplexTCPConfig = client.RawConfig{
ServerName: "www.example.com",
ProxyMethod: "shadowsocks",
EncryptionMethod: "plain",
UID: bypassUID[:],
PublicKey: publicKey,
NumConn: 0,
UDP: false,
Transport: "direct",
RemoteHost: "fake.com",
RemotePort: "9999",
LocalHost: "127.0.0.1",
LocalPort: "9999",
BrowserSig: "safari",
}
func generateClientConfigs(rawConfig client.RawConfig, state common.WorldState) (client.LocalConnConfig, client.RemoteConnConfig, client.AuthInfo) {
lcl, rmt, auth, err := rawConfig.ProcessRawConfig(state)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
return lcl, rmt, auth return lcl, rmt, auth
} }
func basicServerState(ws common.WorldState) *server.State { func basicServerState(ws common.WorldState, db *os.File) *server.State {
var serverConfig = server.RawConfig{ var serverConfig = server.RawConfig{
ProxyBook: map[string][]string{"shadowsocks": {"tcp", "fake.com:9999"}, "openvpn": {"udp", "fake.com:9999"}}, ProxyBook: map[string][]string{"tcp": {"tcp", "fake.com:9999"}, "udp": {"udp", "fake.com:9999"}},
BindAddr: []string{"fake.com:9999"}, BindAddr: []string{"fake.com:9999"},
BypassUID: [][]byte{bypassUID[:]}, BypassUID: [][]byte{bypassUID[:]},
RedirAddr: "fake.com:9999", RedirAddr: "fake.com:9999",
PrivateKey: privateKey, PrivateKey: privateKey,
KeepAlive: 15, AdminUID: nil,
CncMode: false, DatabasePath: db.Name(),
StreamTimeout: 300,
KeepAlive: 15,
CncMode: false,
} }
state, err := server.InitState(serverConfig, ws) state, err := server.InitState(serverConfig, ws)
if err != nil { if err != nil {
@ -162,36 +133,15 @@ func (m *mockUDPDialer) Dial(network, address string) (net.Conn, error) {
} }
func establishSession(lcc client.LocalConnConfig, rcc client.RemoteConnConfig, ai client.AuthInfo, serverState *server.State) (common.Dialer, *connutil.PipeListener, common.Dialer, net.Listener, error) { func establishSession(lcc client.LocalConnConfig, rcc client.RemoteConnConfig, ai client.AuthInfo, serverState *server.State) (common.Dialer, *connutil.PipeListener, common.Dialer, net.Listener, error) {
// redirecting web server // transport
// ^ ckClientDialer, ckServerListener := connutil.DialerListener(10 * 1024)
// |
// |
// redirFromCkServerL
// |
// |
// proxy client ----proxyToCkClientD----> ck-client ------> ck-server ----proxyFromCkServerL----> proxy server
// ^
// |
// |
// netToCkServerD
// |
// |
// whatever connection initiator (including a proper ck-client)
netToCkServerD, ckServerListener := connutil.DialerListener(10 * 1024)
clientSeshMaker := func() *mux.Session { clientSeshMaker := func() *mux.Session {
ai := ai return client.MakeSession(rcc, ai, ckClientDialer, false)
quad := make([]byte, 4)
common.RandRead(ai.WorldState.Rand, quad)
ai.SessionId = binary.BigEndian.Uint32(quad)
return client.MakeSession(rcc, ai, netToCkServerD)
} }
useSessionPerConnection := rcc.NumConn == 0
var proxyToCkClientD common.Dialer var proxyToCkClientD common.Dialer
if ai.Unordered { if ai.Unordered {
// We can only "dial" a single UDP connection as we can't send packets from different context
// to a single UDP listener
addrCh := make(chan *net.UDPAddr, 1) addrCh := make(chan *net.UDPAddr, 1)
mDialer := &mockUDPDialer{ mDialer := &mockUDPDialer{
addrCh: addrCh, addrCh: addrCh,
@ -202,73 +152,72 @@ func establishSession(lcc client.LocalConnConfig, rcc client.RemoteConnConfig, a
addrCh <- conn.LocalAddr().(*net.UDPAddr) addrCh <- conn.LocalAddr().(*net.UDPAddr)
return conn, err return conn, err
} }
go client.RouteUDP(acceptor, lcc.Timeout, rcc.Singleplex, clientSeshMaker) go client.RouteUDP(acceptor, lcc.Timeout, clientSeshMaker, useSessionPerConnection)
proxyToCkClientD = mDialer proxyToCkClientD = mDialer
} else { } else {
var proxyToCkClientL *connutil.PipeListener var proxyToCkClientL *connutil.PipeListener
proxyToCkClientD, proxyToCkClientL = connutil.DialerListener(10 * 1024) proxyToCkClientD, proxyToCkClientL = connutil.DialerListener(10 * 1024)
go client.RouteTCP(proxyToCkClientL, lcc.Timeout, rcc.Singleplex, clientSeshMaker) go client.RouteTCP(proxyToCkClientL, lcc.Timeout, clientSeshMaker, useSessionPerConnection)
} }
// set up server // set up server
ckServerToProxyD, proxyFromCkServerL := connutil.DialerListener(10 * 1024) ckServerToProxyD, ckServerToProxyL := connutil.DialerListener(10 * 1024)
ckServerToWebD, redirFromCkServerL := connutil.DialerListener(10 * 1024) ckServerToWebD, ckServerToWebL := connutil.DialerListener(10 * 1024)
serverState.ProxyDialer = ckServerToProxyD serverState.ProxyDialer = ckServerToProxyD
serverState.RedirDialer = ckServerToWebD serverState.RedirDialer = ckServerToWebD
go server.Serve(ckServerListener, serverState) go server.Serve(ckServerListener, serverState)
return proxyToCkClientD, proxyFromCkServerL, netToCkServerD, redirFromCkServerL, nil return proxyToCkClientD, ckServerToProxyL, ckClientDialer, ckServerToWebL, nil
} }
func runEchoTest(t *testing.T, conns []net.Conn, msgLen int) { func runEchoTest(t *testing.T, conns []net.Conn, maxMsgLen int) {
var wg sync.WaitGroup var wg sync.WaitGroup
for _, conn := range conns { for _, conn := range conns {
wg.Add(1) wg.Add(1)
go func(conn net.Conn) { go func(conn net.Conn) {
defer wg.Done() testDataLen := rand.Intn(maxMsgLen)
testData := make([]byte, testDataLen)
testData := make([]byte, msgLen)
rand.Read(testData) rand.Read(testData)
// we cannot call t.Fatalf in concurrent contexts
n, err := conn.Write(testData) n, err := conn.Write(testData)
if n != msgLen { if n != testDataLen {
t.Errorf("written only %v, err %v", n, err) t.Fatalf("written only %v, err %v", n, err)
return
} }
recvBuf := make([]byte, msgLen) recvBuf := make([]byte, testDataLen)
_, err = io.ReadFull(conn, recvBuf) _, err = io.ReadFull(conn, recvBuf)
if err != nil { if err != nil {
t.Errorf("failed to read back: %v", err) t.Fatalf("failed to read back: %v", err)
return
} }
if !bytes.Equal(testData, recvBuf) { if !bytes.Equal(testData, recvBuf) {
t.Errorf("echoed data not correct") t.Fatalf("echoed data not correct")
return
} }
wg.Done()
}(conn) }(conn)
} }
wg.Wait() wg.Wait()
} }
func TestUDP(t *testing.T) { func TestUDP(t *testing.T) {
log.SetLevel(log.ErrorLevel) var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
log.SetLevel(log.TraceLevel)
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(basicUDPConfig, worldState) lcc, rcc, ai := basicClientConfigs(worldState)
sta := basicServerState(worldState) ai.ProxyMethod = "udp"
ai.Unordered = true
sta := basicServerState(worldState, tmpDB)
proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta) pxyClientD, pxyServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
t.Run("simple send", func(t *testing.T) { t.Run("simple send", func(t *testing.T) {
pxyClientConn, err := proxyToCkClientD.Dial("udp", "") pxyClientConn, err := pxyClientD.Dial("udp", "")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -284,7 +233,7 @@ func TestUDP(t *testing.T) {
t.Error(err) t.Error(err)
} }
pxyServerConn, err := proxyFromCkServerL.ListenPacket("", "") pxyServerConn, err := pxyServerL.ListenPacket("", "")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -298,88 +247,29 @@ func TestUDP(t *testing.T) {
} }
}) })
const echoMsgLen = 1024
t.Run("user echo", func(t *testing.T) { t.Run("user echo", func(t *testing.T) {
go serveUDPEcho(proxyFromCkServerL) go serveUDPEcho(pxyServerL)
var conn [1]net.Conn var conn [1]net.Conn
conn[0], err = proxyToCkClientD.Dial("udp", "") conn[0], err = pxyClientD.Dial("udp", "")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
runEchoTest(t, conn[:], echoMsgLen) runEchoTest(t, conn[:], 1024)
}) })
} }
func TestTCPSingleplex(t *testing.T) { func TestTCP(t *testing.T) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
log.SetLevel(log.ErrorLevel) log.SetLevel(log.ErrorLevel)
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(singleplexTCPConfig, worldState) lcc, rcc, ai := basicClientConfigs(worldState)
sta := basicServerState(worldState) sta := basicServerState(worldState, tmpDB)
proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
if err != nil {
t.Fatal(err)
}
const echoMsgLen = 1 << 16 pxyClientD, pxyServerL, dialerToCkServer, rdirServerL, err := establishSession(lcc, rcc, ai, sta)
go serveTCPEcho(proxyFromCkServerL)
proxyConn1, err := proxyToCkClientD.Dial("", "")
if err != nil {
t.Fatal(err)
}
runEchoTest(t, []net.Conn{proxyConn1}, echoMsgLen)
user, err := sta.Panel.GetUser(ai.UID[:])
if err != nil {
t.Fatalf("failed to fetch user: %v", err)
}
if user.NumSession() != 1 {
t.Error("no session were made on first connection establishment")
}
proxyConn2, err := proxyToCkClientD.Dial("", "")
if err != nil {
t.Fatal(err)
}
runEchoTest(t, []net.Conn{proxyConn2}, echoMsgLen)
if user.NumSession() != 2 {
t.Error("no extra session were made on second connection establishment")
}
// Both conns should work
runEchoTest(t, []net.Conn{proxyConn1, proxyConn2}, echoMsgLen)
proxyConn1.Close()
assert.Eventually(t, func() bool {
return user.NumSession() == 1
}, time.Second, 10*time.Millisecond, "first session was not closed on connection close")
// conn2 should still work
runEchoTest(t, []net.Conn{proxyConn2}, echoMsgLen)
var conns [numConns]net.Conn
for i := 0; i < numConns; i++ {
conns[i], err = proxyToCkClientD.Dial("", "")
if err != nil {
t.Fatal(err)
}
}
runEchoTest(t, conns[:], echoMsgLen)
}
func TestTCPMultiplex(t *testing.T) {
log.SetLevel(log.ErrorLevel)
worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(basicTCPConfig, worldState)
sta := basicServerState(worldState)
proxyToCkClientD, proxyFromCkServerL, netToCkServerD, redirFromCkServerL, err := establishSession(lcc, rcc, ai, sta)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -390,8 +280,8 @@ func TestTCPMultiplex(t *testing.T) {
writeData := make([]byte, dataLen) writeData := make([]byte, dataLen)
rand.Read(writeData) rand.Read(writeData)
t.Run(fmt.Sprintf("data length %v", dataLen), func(t *testing.T) { t.Run(fmt.Sprintf("data length %v", dataLen), func(t *testing.T) {
go serveTCPEcho(proxyFromCkServerL) go serveTCPEcho(pxyServerL)
conn, err := proxyToCkClientD.Dial("", "") conn, err := pxyClientD.Dial("", "")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -416,158 +306,160 @@ func TestTCPMultiplex(t *testing.T) {
} }
}) })
const echoMsgLen = 16384
t.Run("user echo", func(t *testing.T) { t.Run("user echo", func(t *testing.T) {
go serveTCPEcho(proxyFromCkServerL) go serveTCPEcho(pxyServerL)
var conns [numConns]net.Conn var conns [numConns]net.Conn
for i := 0; i < numConns; i++ { for i := 0; i < numConns; i++ {
conns[i], err = proxyToCkClientD.Dial("", "") conns[i], err = pxyClientD.Dial("", "")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
} }
runEchoTest(t, conns[:], echoMsgLen) runEchoTest(t, conns[:], 65536)
}) })
t.Run("redir echo", func(t *testing.T) { t.Run("redir echo", func(t *testing.T) {
go serveTCPEcho(redirFromCkServerL) go serveTCPEcho(rdirServerL)
var conns [numConns]net.Conn var conns [numConns]net.Conn
for i := 0; i < numConns; i++ { for i := 0; i < numConns; i++ {
conns[i], err = netToCkServerD.Dial("", "") conns[i], err = dialerToCkServer.Dial("", "")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
} }
runEchoTest(t, conns[:], echoMsgLen) runEchoTest(t, conns[:], 65536)
}) })
} }
func TestClosingStreamsFromProxy(t *testing.T) { func TestClosingStreamsFromProxy(t *testing.T) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
log.SetLevel(log.ErrorLevel) log.SetLevel(log.ErrorLevel)
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := basicClientConfigs(worldState)
for clientConfigName, clientConfig := range map[string]client.RawConfig{"basic": basicTCPConfig, "singleplex": singleplexTCPConfig} { sta := basicServerState(worldState, tmpDB)
clientConfig := clientConfig pxyClientD, pxyServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
clientConfigName := clientConfigName if err != nil {
t.Run(clientConfigName, func(t *testing.T) { t.Fatal(err)
lcc, rcc, ai := generateClientConfigs(clientConfig, worldState)
sta := basicServerState(worldState)
proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
if err != nil {
t.Fatal(err)
}
t.Run("closing from server", func(t *testing.T) {
clientConn, _ := proxyToCkClientD.Dial("", "")
clientConn.Write(make([]byte, 16))
serverConn, _ := proxyFromCkServerL.Accept()
serverConn.Close()
assert.Eventually(t, func() bool {
_, err := clientConn.Read(make([]byte, 16))
return err != nil
}, time.Second, 10*time.Millisecond, "closing stream on server side is not reflected to the client")
})
t.Run("closing from client", func(t *testing.T) {
// closing stream on client side
clientConn, _ := proxyToCkClientD.Dial("", "")
clientConn.Write(make([]byte, 16))
serverConn, _ := proxyFromCkServerL.Accept()
clientConn.Close()
assert.Eventually(t, func() bool {
_, err := serverConn.Read(make([]byte, 16))
return err != nil
}, time.Second, 10*time.Millisecond, "closing stream on client side is not reflected to the server")
})
t.Run("send then close", func(t *testing.T) {
testData := make([]byte, 24*1024)
rand.Read(testData)
clientConn, _ := proxyToCkClientD.Dial("", "")
go func() {
clientConn.Write(testData)
// it takes time for this written data to be copied asynchronously
// into ck-server's domain. If the pipe is closed before that, read
// by ck-client in RouteTCP will fail as we have closed it.
time.Sleep(700 * time.Millisecond)
clientConn.Close()
}()
readBuf := make([]byte, len(testData))
serverConn, err := proxyFromCkServerL.Accept()
if err != nil {
t.Errorf("failed to accept a connection delievering data sent before closing: %v", err)
}
_, err = io.ReadFull(serverConn, readBuf)
if err != nil {
t.Errorf("failed to read data sent before closing: %v", err)
}
})
})
} }
t.Run("closing from server", func(t *testing.T) {
clientConn, _ := pxyClientD.Dial("", "")
clientConn.Write(make([]byte, 16))
serverConn, _ := pxyServerL.Accept()
serverConn.Close()
time.Sleep(500 * time.Millisecond)
if _, err := clientConn.Read(make([]byte, 16)); err == nil {
t.Errorf("closing stream on server side is not reflected to the client: %v", err)
}
})
t.Run("closing from client", func(t *testing.T) {
// closing stream on client side
clientConn, _ := pxyClientD.Dial("", "")
clientConn.Write(make([]byte, 16))
serverConn, _ := pxyServerL.Accept()
clientConn.Close()
time.Sleep(500 * time.Millisecond)
if _, err := serverConn.Read(make([]byte, 16)); err == nil {
t.Errorf("closing stream on client side is not reflected to the server: %v", err)
}
})
t.Run("send then close", func(t *testing.T) {
testData := make([]byte, 24*1024)
rand.Read(testData)
clientConn, _ := pxyClientD.Dial("", "")
go func() {
clientConn.Write(testData)
// TODO: this is time dependent. It could be due to the time it took for this
// connutil.StreamPipe's Close to be reflected on the copy function, instead of inherent bad sync
// in multiplexer
time.Sleep(10 * time.Millisecond)
clientConn.Close()
}()
readBuf := make([]byte, len(testData))
serverConn, _ := pxyServerL.Accept()
_, err := io.ReadFull(serverConn, readBuf)
if err != nil {
t.Errorf("failed to read data sent before closing: %v", err)
}
})
} }
func BenchmarkIntegration(b *testing.B) { func BenchmarkThroughput(b *testing.B) {
var tmpDB, _ = ioutil.TempFile("", "ck_user_info")
defer os.Remove(tmpDB.Name())
log.SetLevel(log.ErrorLevel) log.SetLevel(log.ErrorLevel)
worldState := common.WorldOfTime(time.Unix(10, 0)) worldState := common.WorldOfTime(time.Unix(10, 0))
lcc, rcc, ai := generateClientConfigs(basicTCPConfig, worldState) lcc, rcc, ai := basicClientConfigs(worldState)
sta := basicServerState(worldState) sta := basicServerState(worldState, tmpDB)
const bufSize = 16 * 1024 const bufSize = 16 * 1024
encryptionMethods := map[string]byte{ encryptionMethods := map[string]byte{
"plain": mux.EncryptionMethodPlain, "plain": mux.E_METHOD_PLAIN,
"chacha20-poly1305": mux.EncryptionMethodChaha20Poly1305, "chacha20-poly1305": mux.E_METHOD_CHACHA20_POLY1305,
"aes-256-gcm": mux.EncryptionMethodAES256GCM, "aes-gcm": mux.E_METHOD_AES_GCM,
"aes-128-gcm": mux.EncryptionMethodAES128GCM,
} }
for name, method := range encryptionMethods { for name, method := range encryptionMethods {
b.Run(name, func(b *testing.B) { b.Run(name, func(b *testing.B) {
ai.EncryptionMethod = method ai.EncryptionMethod = method
proxyToCkClientD, proxyFromCkServerL, _, _, err := establishSession(lcc, rcc, ai, sta) pxyClientD, pxyServerL, _, _, err := establishSession(lcc, rcc, ai, sta)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
b.Run("single stream bandwidth", func(b *testing.B) { b.Run("single conn", func(b *testing.B) {
more := make(chan int, 10) more := make(chan int, 10)
go func() { go func() {
// sender
writeBuf := make([]byte, bufSize+100) writeBuf := make([]byte, bufSize+100)
serverConn, _ := proxyFromCkServerL.Accept() serverConn, _ := pxyServerL.Accept()
for { for {
serverConn.Write(writeBuf) serverConn.Write(writeBuf)
<-more <-more
} }
}() }()
// receiver clientConn, _ := pxyClientD.Dial("", "")
clientConn, _ := proxyToCkClientD.Dial("", "")
readBuf := make([]byte, bufSize) readBuf := make([]byte, bufSize)
clientConn.Write([]byte{1}) // to make server accept clientConn.Write([]byte{1}) // to make server accept
b.SetBytes(bufSize) b.SetBytes(bufSize)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
io.ReadFull(clientConn, readBuf) io.ReadFull(clientConn, readBuf)
// ask for more
more <- 0 more <- 0
} }
}) })
b.Run("single stream latency", func(b *testing.B) { /*
clientConn, _ := proxyToCkClientD.Dial("", "") b.Run("multiconn", func(b *testing.B) {
buf := []byte{1} writeBuf := make([]byte, bufSize)
clientConn.Write(buf) b.SetBytes(bufSize)
serverConn, _ := proxyFromCkServerL.Accept() b.ResetTimer()
serverConn.Read(buf) b.RunParallel(func(pb *testing.PB) {
b.ResetTimer() ready := make(chan int, 10)
for i := 0; i < b.N; i++ { go func() {
clientConn.Write(buf) serverConn, _ := pxyServerL.Accept()
serverConn.Read(buf) for {
} serverConn.Write(writeBuf)
}) <-ready
}
}()
readBuf := make([]byte, bufSize)
clientConn, _ := pxyClientD.Dial("", "")
clientConn.Write([]byte{1}) // to make server accept
for pb.Next() {
io.ReadFull(clientConn,readBuf)
ready <- 0
}
})
})
*/
}) })
} }

View File

@ -1,12 +1,15 @@
#!/usr/bin/env bash go get github.com/mitchellh/gox
set -eu
go install github.com/mitchellh/gox@latest
mkdir -p release mkdir -p release
rm -f ./release/* read -rp "Cleaning $PWD/release directory. Proceed? [y/n]" res
if [ ! "$res" == "y" ]; then
echo "Abort"
exit 1
fi
rm -rf ./release/*
if [ -z "$v" ]; then if [ -z "$v" ]; then
echo "Version number cannot be null. Run with v=[version] release.sh" echo "Version number cannot be null. Run with v=[version] release.sh"
@ -14,24 +17,20 @@ if [ -z "$v" ]; then
fi fi
output="{{.Dir}}-{{.OS}}-{{.Arch}}-$v" output="{{.Dir}}-{{.OS}}-{{.Arch}}-$v"
osarch="!darwin/arm !darwin/386" osarch="!darwin/arm !darwin/arm64 !darwin/386"
echo "Compiling:" echo "Compiling:"
os="windows linux darwin" os="windows linux darwin"
arch="amd64 386 arm arm64 mips mips64 mipsle mips64le" arch="amd64 386 arm arm64 mips mips64 mipsle mips64le"
pushd cmd/ck-client pushd cmd/ck-client || exit 1
CGO_ENABLED=0 gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output" gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
CGO_ENABLED=0 GOOS="linux" GOARCH="mips" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mips_softfloat-"${v}" GOOS="linux" GOARCH="mips" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mips_softfloat-"${v}"
CGO_ENABLED=0 GOOS="linux" GOARCH="mipsle" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mipsle_softfloat-"${v}" GOOS="linux" GOARCH="mipsle" GOMIPS="softfloat" go build -ldflags "-X main.version=${v}" -o ck-client-linux-mipsle_softfloat-"${v}"
mv ck-client-* ../../release mv ck-client-* ../../release
popd
os="linux" os="linux"
arch="amd64 386 arm arm64" arch="amd64 386 arm arm64"
pushd cmd/ck-server pushd ../ck-server || exit 1
CGO_ENABLED=0 gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output" gox -ldflags "-X main.version=${v}" -os="$os" -arch="$arch" -osarch="$osarch" -output="$output"
mv ck-server-* ../../release mv ck-server-* ../../release
popd
sha256sum release/*

View File

@ -1,13 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
],
"packageRules": [
{
"packagePatterns": ["*"],
"excludePackagePatterns": ["utls"],
"enabled": false
}
]
}