1
0
mirror of https://github.com/strongdm/comply synced 2025-12-15 10:43:47 +00:00

Compare commits

...

137 Commits

Author SHA1 Message Date
Justin McCarthy
f5f7c08b73 increment minor for release (via Makefile) 2020-09-14 12:01:37 -07:00
Justin McCarthy
b2276f9e54 increment patch for release (via Makefile) 2020-09-14 11:54:47 -07:00
Justin McCarthy
f1b5bbeff9 manual version bump 2020-09-14 11:54:24 -07:00
Justin McCarthy
b7acb1eecf increment patch for release (via Makefile) 2020-09-14 11:43:46 -07:00
Justin McCarthy
da8a574e86 increment patch for release (via Makefile) 2020-09-14 11:41:38 -07:00
Justin McCarthy
7468711b3b go.mod: update gitlab dep to v0.30.1
Releases after github.com/xanzy/go-gitlab@v0.31.0 introduce breaking
changes to the NewClient call.

Addresses #94
2020-09-14 11:31:56 -07:00
Michael E. Gruen
8baf787ed7 Bug fixes (gitlab.go): pagination, labels (#84) 2020-09-14 11:11:09 -07:00
Justin McCarthy
4c5c18964b go.mod: update dependencies
Addresses #94
2020-09-14 11:08:56 -07:00
U Cirello
c5a1bd804b Merge pull request #65 from ucirello/master
chore: migrate to go modules
2019-08-13 00:42:53 +02:00
Carlos C
c303b68201 chore: migrate to go modules 2019-07-14 13:51:10 -03:00
Paddy Byers
87e8266f1b Allow github config to be passed in env (#62) 2019-06-18 12:28:16 -04:00
Justin McCarthy
e8d6d536a8 doc enhancements 2019-03-16 00:28:08 -07:00
Justin McCarthy
ef7a4092a7 increment patch for release (via Makefile) 2019-03-16 00:21:09 -07:00
Justin McCarthy
9bcc88eebc all interfaces 2019-03-16 00:21:01 -07:00
Justin McCarthy
edd3c7dd44 prep port exposure from docker 2019-03-16 00:20:46 -07:00
Justin McCarthy
25058359ea increment minor for release (via Makefile) 2019-03-16 00:05:08 -07:00
Justin McCarthy
c4a0b7afff do not refresh assets during release 2019-03-16 00:04:55 -07:00
Justin McCarthy
ef32942dad Serve now provides an HTTP server, listening on port 4000 by default. Listen port can be set with the --port flag to the serve command. Websocket refresh remains intact. 2019-03-16 00:02:50 -07:00
Justin McCarthy
0c9dbd736b slightly better description 2019-02-21 00:54:22 -08:00
Justin McCarthy
b98d179847 Dockerfile example 2019-02-20 22:51:41 -08:00
Justin McCarthy
435ac086c0 increment patch for release (via Makefile) 2018-12-17 14:52:27 -08:00
Justin McCarthy
0ddbb6cf52 automated asset refresh (via Makefile) 2018-12-17 14:52:19 -08:00
Justin McCarthy
3ebccc2811 Revert "automated asset refresh (via Makefile)"
This reverts commit 24ff9dd762.
2018-12-17 14:51:46 -08:00
Justin McCarthy
b39bec8108 do not auto-clobber from example 2018-12-17 14:51:07 -08:00
Justin McCarthy
c699c64627 increment patch for release (via Makefile) 2018-12-17 14:43:58 -08:00
Justin McCarthy
24ff9dd762 automated asset refresh (via Makefile) 2018-12-17 14:43:50 -08:00
Justin McCarthy
5c160d1ec5 increment patch for release (via Makefile) 2018-12-17 14:43:31 -08:00
Justin McCarthy
f3088bfe28 update authors 2018-12-17 14:43:00 -08:00
Kevin N. Murphy
aa57be25c9 fix markdown whitespace on heading (#59) 2018-12-17 14:41:37 -08:00
arambhashura
2cef618abb Minor correction. (#58)
The word "is" was missing in the first sentence of Purpose and Scope.
2018-12-17 14:41:27 -08:00
Justin McCarthy
e8a0ecd076 Jira fields were swapped 2018-12-17 14:41:09 -08:00
Andy Magnusson
1a3e2a2bf7 Merge branch 'master' of https://github.com/strongdm/comply 2018-12-06 10:39:15 -05:00
Andy Magnusson
7225741a46 Adding new templates to proper soc2 narratives location 2018-12-06 10:10:18 -05:00
Justin McCarthy
7105d2d4ef increment patch for release (via Makefile) 2018-11-08 22:10:44 -08:00
Justin McCarthy
e3fb66b0e4 automated asset refresh (via Makefile) 2018-11-08 22:10:37 -08:00
Justin McCarthy
1028521ea7 increment patch for release (via Makefile) 2018-11-08 21:57:00 -08:00
Justin McCarthy
b2c2bf3558 update authors 2018-11-08 21:56:23 -08:00
Matt Simerson
d44af2e1cd gitlab: search for the label name that issues have (#57)
* gitlab: search for the label name that issues have

comply creates tags with "comply-" prefix, so search for that

* fix issue counters by using correct label name
2018-11-08 21:53:17 -08:00
Andy Magnusson
e2281b5fe3 Backporting new narrative content to Comply 2018-10-04 10:23:24 -04:00
Justin McCarthy
d43aca58ba increment patch for release (via Makefile) 2018-08-29 15:50:07 -07:00
Justin McCarthy
9e4cd2cf1a automated asset refresh (via Makefile) 2018-08-29 15:49:56 -07:00
Justin McCarthy
3711e0054d fmt 2018-08-29 15:48:07 -07:00
Justin McCarthy
2e9f6cf270 getGitApprovalInfo early exit if approvedBranch unspecified.
go fmt.
2018-08-29 15:47:02 -07:00
Justin McCarthy
c5868fa544 update authors 2018-08-29 15:45:27 -07:00
Alan Cox
274986ad9c Optionally, authorship and approval information appended to policies (#54) 2018-08-29 15:39:50 -07:00
Alan Cox
bcc9b06ac4 Specifying Jira Issuetype in config.yaml (#53)
* Jira integration documentation improved. Added ability to specify what type of issue to create in Jira

* Apparently, Go doesn't like http/https in front of package name in
2018-08-29 15:17:00 -07:00
Justin McCarthy
8f3d668789 incorrect ticket label 2018-08-17 13:32:35 -07:00
Andy Magnusson
358ac431a6 Retitled CC2.1, CC2.2, CC2.3 2018-08-16 13:40:29 -04:00
Justin McCarthy
365e98222b increment patch for release (via Makefile) 2018-07-23 11:57:14 -07:00
Justin McCarthy
84589a83f4 automated asset refresh (via Makefile) 2018-07-23 11:57:06 -07:00
Justin McCarthy
b329107079 reverting cd89840164 2018-07-23 11:56:44 -07:00
Justin McCarthy
3f5d9b4409 increment minor for release (via Makefile) 2018-07-20 17:55:58 -07:00
Justin McCarthy
3df822206e automated asset refresh (via Makefile) 2018-07-20 17:55:51 -07:00
Justin McCarthy
007cf3dd3c dep ensure 2018-07-20 17:55:27 -07:00
Justin McCarthy
97989c5cf6 Merge branch 'master' of github.com:strongdm/comply 2018-07-20 17:45:33 -07:00
Mason Hensley
ce5c4c3a4a Add gitlab issue integration (#51)
* Add gitlab issue integration

* Clean up gitlab issue integration
2018-07-20 17:45:20 -07:00
Justin Bodeutsch
cd89840164 Fixed date on generated PDFs (#52)
* Percent signs need to be escaped in Printf

* Update usage of ModifiedAt to fix date stamp on generated PDFs
2018-07-20 17:45:07 -07:00
Justin McCarthy
e60d7285f4 more precise cross-platform dependency checking 2018-07-20 17:44:28 -07:00
Justin McCarthy
c99d800397 use filepath join to open index html 2018-07-20 17:08:09 -07:00
Justin McCarthy
0f1badca5b prepare Makefile for introduction of Windows binary 2018-07-05 17:26:29 -07:00
Justin McCarthy
00b59ed620 update to reflect contributions 2018-07-02 17:12:00 -07:00
Justin Bodeutsch
749017761d Percent signs need to be escaped in Printf (#49) 2018-07-02 17:10:00 -07:00
Justin McCarthy
f502225cde increment patch for release (via Makefile) 2018-06-25 17:50:51 -07:00
Justin McCarthy
6cf6f70296 automated asset refresh (via Makefile) 2018-06-25 17:50:42 -07:00
Justin McCarthy
3494bdce7b prep version notification and release channel structure 2018-06-25 17:50:07 -07:00
Justin McCarthy
924dd25744 increment patch for release (via Makefile) 2018-06-15 16:51:26 -07:00
Justin McCarthy
02d3b75731 automated asset refresh (via Makefile) 2018-06-15 16:51:17 -07:00
Justin McCarthy
4a314c62d1 pandoc 2 args; validate presence of PDF after calling docker pandoc. 2018-06-15 16:00:37 -07:00
Anthony Oliver
f2ef58e7bd Added package for missing fonts, otherwise updated pandoc 2 with pandoc -f markdown+smart doesn't work and throws a mktextfm ecrm1000 error (#46) 2018-06-15 11:49:45 -07:00
Justin McCarthy
fc1a1d9abc brew goal should not compile assets, which depends on go get 2018-06-09 22:27:49 -07:00
Justin McCarthy
65dddc4332 increment patch for release (via Makefile) 2018-06-07 22:24:17 -07:00
Justin McCarthy
eecfe49fbd automated asset refresh (via Makefile) 2018-06-07 22:24:09 -07:00
Justin McCarthy
44931ca808 fixes #45 2018-06-07 22:23:51 -07:00
Justin McCarthy
06b8a2fe44 increment patch for release (via Makefile) 2018-06-04 16:16:45 -07:00
Justin McCarthy
2d088cdf45 automated asset refresh (via Makefile) 2018-06-04 16:16:37 -07:00
Justin McCarthy
fb60f405ba Feature/jira (#43)
* automated asset refresh (via Makefile)

* increment patch for release (via Makefile)
2018-06-04 16:14:06 -07:00
Justin McCarthy
3c696e6d01 Merge pull request #41 from strongdm/feature/jira
Initial Jira implementation
2018-06-01 17:37:41 -07:00
Justin McCarthy
4d63cf559b dep ensure 2018-06-01 17:37:01 -07:00
Justin McCarthy
0ff74208cc link format 2018-06-01 17:36:41 -07:00
Justin McCarthy
75a80189ce correct tag name 2018-06-01 17:27:56 -07:00
Justin McCarthy
f6c9f89792 use Resolution field rather than Status 2018-06-01 17:18:41 -07:00
Justin McCarthy
25f7156ac2 enable Jira 2018-06-01 17:07:36 -07:00
Justin McCarthy
2d5e6b48cb config override should cause image to be pulled 2018-06-01 17:04:52 -07:00
Justin McCarthy
4d830789ec never pull the docker container if pandoc is present and working in the PATH 2018-06-01 17:01:22 -07:00
Justin McCarthy
4969d179ec find by tag / label 2018-06-01 16:57:06 -07:00
Justin McCarthy
10dc0b70e0 partial jira implementation; TODO: all Find/Read and Link cases. 2018-05-30 16:28:31 -07:00
Justin McCarthy
0f68acae10 increment patch for release (via Makefile) 2018-05-29 16:35:06 -07:00
Justin McCarthy
19e100801a automated asset refresh (via Makefile) 2018-05-29 16:34:57 -07:00
Justin McCarthy
46aaf1c663 document model 2018-05-29 14:42:37 -07:00
Justin McCarthy
815e7e5f61 the data model for Narratives and Policies have converged, allowing both to be represented by a common Docume
nt struct.
2018-05-28 17:05:56 -05:00
Justin McCarthy
ff626a5ee2 pandoc must also include pdflatex 2018-05-28 16:46:35 -05:00
Justin McCarthy
1ec70a67d1 increment minor for release (via Makefile) 2018-05-23 17:16:55 -07:00
Justin McCarthy
096ad03ee1 automated asset refresh (via Makefile) 2018-05-23 17:16:47 -07:00
Justin McCarthy
5d67d60fd4 increment patch for release (via Makefile) 2018-05-23 17:04:43 -07:00
Justin McCarthy
8e3ebdc94a automated asset refresh (via Makefile) 2018-05-23 17:04:35 -07:00
Justin McCarthy
39fd371c4e spelled .envrc wrong 2018-05-23 17:04:03 -07:00
Justin McCarthy
1e5383eb01 env file to ignore 2018-05-23 16:53:07 -07:00
Justin McCarthy
49e950c3c0 If pandoc appears in the path, it will be preferred over Docker.
The pandoc version must be 2.2.1 or greater.

Defaults can be overridden by an optional "pandoc: pandoc"
or "pandoc: docker" in the comply.yml.
2018-05-23 16:48:35 -07:00
Justin McCarthy
ff350a2b89 Prepare pandoc upgrade (not pushed to docker hub yet) 2018-05-23 16:46:28 -07:00
Justin McCarthy
82baa57684 note pandoc dependency 2018-05-23 14:18:28 -07:00
Justin McCarthy
bb4200ff43 replace dockerMustExist with pandocMustExist dependency on build and serve commands 2018-05-23 14:15:39 -07:00
Justin McCarthy
1b807da10e Update AUTHORS.txt 2018-05-20 22:57:37 -07:00
Justin McCarthy
57a617abd5 increment patch for release (via Makefile) 2018-05-18 17:33:15 -07:00
Justin McCarthy
1170ad6a92 automated asset refresh (via Makefile) 2018-05-18 17:33:06 -07:00
Justin McCarthy
b81d8388ef link to demo video 2018-05-18 17:32:54 -07:00
Justin McCarthy
36331849c8 point Linux users to releases 2018-05-18 17:29:29 -07:00
Justin McCarthy
1d3dcc8f54 logo (small) 2018-05-18 17:27:28 -07:00
Manisha Singh
9309194a40 Merge branch 'master' of github.com:strongdm/comply 2018-05-18 17:25:14 -07:00
Manisha Singh
a37e8dc233 Initial commit of Access 2018-05-18 17:25:11 -07:00
Justin McCarthy
bef531973f increment patch for release (via Makefile) 2018-05-18 16:48:36 -07:00
Justin McCarthy
a025ea5e39 automated asset refresh (via Makefile) 2018-05-18 16:48:28 -07:00
Justin McCarthy
cee7553319 invoke update in tap dir 2018-05-18 16:48:25 -07:00
Justin McCarthy
4c55c371af increment patch for release (via Makefile) 2018-05-18 16:44:29 -07:00
Justin McCarthy
af4fb6e0d2 automated asset refresh (via Makefile) 2018-05-18 16:44:21 -07:00
Justin McCarthy
0e1eed80c9 capture sha 2018-05-18 16:44:16 -07:00
Justin McCarthy
deeb8c1695 capture sha 2018-05-18 16:44:16 -07:00
Manisha Singh
2a4486315e Merge branch 'master' of github.com:strongdm/comply 2018-05-18 16:41:22 -07:00
Manisha Singh
df159a5f0d Initial commit of Risk 2018-05-18 16:41:18 -07:00
Justin McCarthy
f8a742556d increment patch for release (via Makefile) 2018-05-18 16:31:38 -07:00
Justin McCarthy
eb00183724 automated asset refresh (via Makefile) 2018-05-18 16:31:30 -07:00
Manisha Singh
5acf683e04 Initial commit of Availability 2018-05-18 16:09:48 -07:00
Manisha Singh
491bd00b20 Updated Table 3 2018-05-18 16:02:38 -07:00
Manisha Singh
a642c812e3 Initial commit of Encryption 2018-05-18 15:42:35 -07:00
Manisha Singh
69d036b00b Merge branch 'master' of github.com:strongdm/comply 2018-05-18 14:48:11 -07:00
Manisha Singh
736dfc539c Initial commit of Data Classification 2018-05-18 14:48:08 -07:00
Justin McCarthy
f5b28a1bac introduce procedure command 2018-05-18 14:42:55 -07:00
Justin McCarthy
80c8978034 increment patch for release (via Makefile) 2018-05-18 12:35:15 -07:00
Justin McCarthy
592852ad38 automated asset refresh (via Makefile) 2018-05-18 12:35:06 -07:00
Manisha Singh
179477b4ef Merge branch 'master' of github.com:strongdm/comply 2018-05-18 12:33:51 -07:00
Manisha Singh
46fd0e6987 Initial commit of Vendor 2018-05-18 12:33:48 -07:00
Justin McCarthy
9a357f7bd6 workstation procedure example 2018-05-18 12:33:08 -07:00
Justin McCarthy
378a27542f Merge branch 'master' of github.com:strongdm/comply 2018-05-18 12:21:00 -07:00
Justin McCarthy
1b010e0d04 Procedure examples 2018-05-18 12:20:53 -07:00
Manisha Singh
1eebcaeee1 Initial commit of Development 2018-05-18 12:11:48 -07:00
Manisha Singh
6973675228 Initial commit of Removable Media 2018-05-18 11:56:09 -07:00
Manisha Singh
5e82aa0cfe Merge branch 'master' of github.com:strongdm/comply 2018-05-18 11:47:03 -07:00
Manisha Singh
ed8d8a9404 Initial commit of Remote Access 2018-05-18 11:46:58 -07:00
1156 changed files with 190272 additions and 84833 deletions

2
.gitignore vendored
View File

@@ -1,3 +1,5 @@
comply
output
dist
.envrc
bindata.go

View File

@@ -1,3 +1,12 @@
# Authors in alphabetical order:
arambhashura
Alan Cox
Andy Magnusson
Anthony Oliver
Justin Bodeutsch
Justin McCarthy <justin@strongdm.com>
Kevin N. Murphy
Manisha Singh
Mason Hensley
Matt Simerson

21
Dockerfile Normal file
View File

@@ -0,0 +1,21 @@
FROM strongdm/pandoc:latest
# based on implementation by James Gregory <james@jagregory.com>
MAINTAINER Comply <comply@strongdm.com>
RUN apt-get update -y \
&& apt-get install -y curl
ARG COMPLY_VERSION
ENV COMPLY_VERSION ${COMPLY_VERSION:-1.4.0}
EXPOSE 4000/tcp
# install comply binary
RUN curl -J -L -o /tmp/comply.tgz https://github.com/strongdm/comply/releases/download/v${COMPLY_VERSION}/comply-v${COMPLY_VERSION}-linux-amd64.tgz \
&& tar -xzf /tmp/comply.tgz \
&& mv ./comply-v${COMPLY_VERSION}-linux-amd64 /usr/local/bin/comply
WORKDIR /source
ENTRYPOINT ["/bin/bash"]

262
Gopkg.lock generated
View File

@@ -1,262 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/Microsoft/go-winio"
packages = ["."]
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
version = "v0.4.7"
[[projects]]
branch = "master"
name = "github.com/chzyer/readline"
packages = ["."]
revision = "f6d7a1f6fbf35bbf9beb80dc63c56a29dcfb759f"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/docker/distribution"
packages = [
"digest",
"reference"
]
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
version = "v2.6.2"
[[projects]]
name = "github.com/docker/docker"
packages = [
"api/types",
"api/types/blkiodev",
"api/types/container",
"api/types/events",
"api/types/filters",
"api/types/mount",
"api/types/network",
"api/types/reference",
"api/types/registry",
"api/types/strslice",
"api/types/swarm",
"api/types/time",
"api/types/versions",
"api/types/volume",
"client",
"pkg/tlsconfig"
]
revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
version = "v1.13.1"
[[projects]]
name = "github.com/docker/go-connections"
packages = [
"nat",
"sockets",
"tlsconfig"
]
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
version = "v0.3.0"
[[projects]]
name = "github.com/docker/go-units"
packages = ["."]
revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52"
version = "v0.3.2"
[[projects]]
name = "github.com/elazarl/go-bindata-assetfs"
packages = ["."]
revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43"
version = "v1.0.0"
[[projects]]
name = "github.com/fatih/color"
packages = ["."]
revision = "507f6050b8568533fb3f5504de8e5205fa62a114"
version = "v1.6.0"
[[projects]]
name = "github.com/fsnotify/fsnotify"
packages = ["."]
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
name = "github.com/gohugoio/hugo"
packages = ["watcher"]
revision = "f414966b942b5aad75565bee6c644782a07f0658"
version = "v0.37.1"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]]
name = "github.com/google/go-github"
packages = ["github"]
revision = "e48060a28fac52d0f1cb758bc8b87c07bac4a87d"
version = "v15.0.0"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/gorilla/websocket"
packages = ["."]
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/jcelliott/lumber"
packages = ["."]
revision = "dd349441af25132d146d7095c6693a15431fc9b1"
[[projects]]
branch = "master"
name = "github.com/juju/ansiterm"
packages = [
".",
"tabwriter"
]
revision = "720a0952cc2ac777afc295d9861263e2a4cf96a1"
[[projects]]
branch = "master"
name = "github.com/lunixbochs/vtclean"
packages = ["."]
revision = "d14193dfc626125c831501c1c42340b4248e1f5a"
[[projects]]
branch = "master"
name = "github.com/manifoldco/promptui"
packages = [
".",
"list",
"screenbuf"
]
revision = "c0c0d3afc6a03bcb5c1df10b70b862a650db9f9b"
[[projects]]
name = "github.com/mattn/go-colorable"
packages = ["."]
revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
version = "v0.0.9"
[[projects]]
name = "github.com/mattn/go-isatty"
packages = ["."]
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
version = "v0.0.3"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/nanobox-io/golang-scribble"
packages = ["."]
revision = "ced58d671850da57ce8c11315424513b608083d7"
[[projects]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
packages = ["."]
revision = "b8a9be070da40449e501c3c4730a889e42d87a9e"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/robfig/cron"
packages = ["."]
revision = "b024fc5ea0e34bc3f83d9941c8d60b0622bfaca4"
version = "v1"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
name = "github.com/urfave/cli"
packages = ["."]
revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1"
version = "v1.20.0"
[[projects]]
name = "github.com/yosssi/ace"
packages = ["."]
revision = "ea038f4770b6746c3f8f84f14fa60d9fe1205b56"
version = "v0.0.5"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"proxy"
]
revision = "d0aafc73d5cdc42264b0af071c261abac580695e"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"internal"
]
revision = "7af32f14d0a25aec7873e0683e8e48dcead159a8"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "dd2ff4accc098aceecb86b36eaa7829b2a17b1c9"
[[projects]]
name = "google.golang.org/appengine"
packages = [
"internal",
"internal/base",
"internal/datastore",
"internal/log",
"internal/remote_api",
"internal/urlfetch",
"urlfetch"
]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
version = "v2.1.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "4fd2ff9f9869c3f3e30601504f4b00fce69d282ae8df42583a1c60848bfd0766"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,30 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[prune]
go-tests = true
unused-packages = true

View File

@@ -3,10 +3,9 @@ GO_SOURCES := $(shell find . -name '*.go')
THEME_SOURCES := $(shell find themes)
assets: $(THEME_SOURCES)
@go get github.com/jteeuwen/go-bindata/...
@go get github.com/elazarl/go-bindata-assetfs/...
@go install github.com/elazarl/go-bindata-assetfs
go-bindata-assetfs -o bindata.go -pkg theme -prefix themes themes/...
go install -mod=vendor github.com/containous/go-bindata/go-bindata
go install -mod=vendor github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
go-bindata-assetfs -pkg theme -prefix themes themes/...
mv bindata.go internal/theme/themes_bindata.go
comply: assets $(GO_SOURCES)
@@ -19,12 +18,14 @@ dist: clean
$(eval LDFLAGS := -ldflags='-X "github.com/strongdm/comply/internal/cli.Version=$(VERSION)"')
mkdir dist
echo $(VERSION)
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -gcflags=-trimpath=$(GOPATH) -asmflags=-trimpath=$(GOPATH) $(LDFLAGS) -o dist/comply-$(VERSION)-darwin-amd64 .
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -gcflags=-trimpath=$(GOPATH) -asmflags=-trimpath=$(GOPATH) $(LDFLAGS) -o dist/comply-$(VERSION)-linux-amd64 .
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -gcflags=-trimpath=$(GOPATH) -asmflags=-trimpath=$(GOPATH) -ldflags '-extldflags "-static"' $(LDFLAGS) -o dist/comply-$(VERSION)-darwin-amd64 .
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -gcflags=-trimpath=$(GOPATH) -asmflags=-trimpath=$(GOPATH) -ldflags '-extldflags "-static"' $(LDFLAGS) -o dist/comply-$(VERSION)-linux-amd64 .
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -gcflags=-trimpath=$(GOPATH) -asmflags=-trimpath=$(GOPATH) -ldflags '-extldflags "-static"' $(LDFLAGS) -o dist/comply-$(VERSION)-windows-amd64.exe .
cd dist && tar -czvf comply-$(VERSION)-darwin-amd64.tgz comply-$(VERSION)-darwin-amd64
cd dist && tar -czvf comply-$(VERSION)-linux-amd64.tgz comply-$(VERSION)-linux-amd64
cd dist && zip comply-$(VERSION)-windows-amd64.zip comply-$(VERSION)-windows-amd64.exe
brew: clean assets $(GO_SOURCES)
brew: clean $(GO_SOURCES)
$(eval VERSION := $(shell cat version))
$(eval LDFLAGS := -ldflags='-X "github.com/strongdm/comply/internal/cli.Version=$(VERSION)"')
mkdir bin
@@ -38,7 +39,7 @@ clean:
install: assets $(GO_SOURCES)
go install github.com/strongdm/comply
push-assets: is-clean export-example assets
push-assets: is-clean assets
git commit -am "automated asset refresh (via Makefile)"
git push
@@ -50,19 +51,8 @@ else
@exit 1
endif
export-example:
cp example/narratives/* themes/comply-soc2/narratives
cp example/procedures/* themes/comply-soc2/procedures
cp example/policies/* themes/comply-soc2/policies
cp example/standards/* themes/comply-soc2/standards
cp example/templates/* themes/comply-soc2/templates
cp example/templates/* themes/comply-blank/templates
cp example/*.md themes/comply-soc2
cp example/*.md themes/comply-blank
docker:
cd build && docker build -t strongdm/pandoc .
docker tag jagregory/pandoc:latest strongdm/pandoc:latest
docker push strongdm/pandoc
cleanse:
@@ -78,6 +68,9 @@ release-env:
ifndef GH_LOGIN
$(error GH_LOGIN must be set to a valid GitHub token)
endif
ifndef COMPLY_TAPDIR
$(error COMPLY_TAPDIR must be set to the path of the comply homebrew tap repo)
endif
release: release-env dist release-deps
$(eval VERSION := $(shell git describe --tags --always --dirty="-dev"))
@@ -105,17 +98,22 @@ release: release-env dist release-deps
--file dist/comply-$(VERSION)-linux-amd64.tgz
@echo "Update homebrew formula with the following: "
@echo "version $(VERSION)"
@curl -L https://github.com/strongdm/comply/archive/$(VERSION).tar.gz |shasum -a 256
$(eval SHA := $(shell curl -s -L https://github.com/strongdm/comply/archive/$(VERSION).tar.gz |shasum -a 256|cut -d" " -f1))
@echo "version $(VERSION) sha $(SHA)"
cd $$COMPLY_TAPDIR && ./update.sh $(VERSION) $(SHA)
patch-release: release-env push-assets patch release
patch-release: release-env patch release
$(eval VERSION := $(shell git describe --tags --always --dirty="-dev"))
curl -X POST --data-urlencode 'payload={"channel": "#release", "username": "release", "text": "comply $(VERSION) released", "icon_emoji": ":shipit:"}' https://hooks.slack.com/services/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
minor-release: release-env push-assets minor release
minor-release: release-env minor release
$(eval VERSION := $(shell git describe --tags --always --dirty="-dev"))
curl -X POST --data-urlencode 'payload={"channel": "#release", "username": "release", "text": "comply $(VERSION) released", "icon_emoji": ":shipit:"}' https://hooks.slack.com/services/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
docker-release:
docker build --build-arg COMPLY_VERSION=`cat VERSION` -t strongdm/comply .
docker push strongdm/comply
patch: clean gitsem
gitsem -m "increment patch for release (via Makefile)" patch
git push
@@ -129,7 +127,7 @@ minor: clean gitsem
release-deps: gitsem gh-release
gitsem:
go get -u github.com/Clever/gitsem
go install github.com/Clever/gitsem
gh-release:
go get -u github.com/aktau/github-release
go install github.com/aktau/github-release

View File

@@ -1,4 +1,4 @@
# Comply
![Comply](https://github.com/strongdm/comply/blob/master/logo.png)
Comply is a SOC2-focused compliance automation tool:
@@ -12,6 +12,10 @@ macOS:
`brew tap strongdm/comply; brew install comply`
Linux:
[Download latest release](https://github.com/strongdm/comply/releases)
Go users:
`go get github.com/strongdm/comply`
@@ -34,6 +38,8 @@ Join us in [Comply Users](https://join.slack.com/t/comply-users/shared_invite/en
# Screenshots
[Demo video](https://vimeo.com/270257486)
## Start a Project
![screencast 1](sc-1.gif)
@@ -48,6 +54,10 @@ Join us in [Comply Users](https://join.slack.com/t/comply-users/shared_invite/en
## Dashboard
![screencast 2](sc-3.gif)
## Dependencies
Comply relies on [pandoc](https://pandoc.org/), which can be installed directly as an OS package or invoked via Docker.
## CLI
```
@@ -58,12 +68,72 @@ USAGE:
comply [global options] command [command options] [arguments...]
COMMANDS:
init initialize a new compliance repository (interactive)
build, b generate a static website summarizing the compliance program
scheduler create tickets based on procedure schedule
serve live updating version of the build command
sync sync ticket status to local cache
todo list declared vs satisfied compliance controls
help, h Shows a list of commands or help for one command
init initialize a new compliance repository (interactive)
build, b generate a static website summarizing the compliance program
procedure, proc create ticket by procedure ID
scheduler create tickets based on procedure schedule
serve live updating version of the build command
sync sync ticket status to local cache
todo list declared vs satisfied compliance controls
help, h Shows a list of commands or help for one command
```
## Running in Docker
Comply is currently only released for Linux and macOS, however from other operating systems it's possible to run using Docker:
```
# first pull the latest published docker image
$ docker pull strongdm/comply
# from an empty directory that will contain your comply project
$ docker run --rm -v "$PWD":/source -p 4000:4000 -it strongdm/comply
root@ec4544732298:/source# comply init
✗ Organization Name:
# serve content live from an established project
$ docker run --rm -v "$PWD":/source -p 4000:4000 -it strongdm/comply
root@ae4d499583fc:/source# comply serve
Serving content of output/ at http://127.0.0.1:4000 (ctrl-c to quit)
```
For Windows users, replace $PWD with the full path to your project directory
## Ticketing Integrations:
- Jira
- Github
- Gitlab
## Configuring Jira
When comply creates a ticket (through `proc`, for instance), it sets the following fields.
- assignee
- description
- issuetype
- labels
- project key
- reporter
- summary
Please make sure that the default *Create Screen* has all of those fields enabled. Additionally, make sure that there are no other required fields for the issue type you choose.
## Forking and local development
> Assumes installation of golang and configuration of GOPATH in .bash_profile, .zshrc, etc
> Inspiration: http://code.openark.org/blog/development/forking-golang-repositories-on-github-and-managing-the-import-path
```
$ go get github.com/strongdm/comply
$ cd $GOPATH/src/github.com/strongdm/comply ; go get ./...
$ make
$ cd example
$ mv comply.yml.example comply.yml
$ ../comply -h
$ ../comply sync
$ ../comply serve
#
$ make # recompile as needed with in $GOPATH/src/github.com/strongdm/comply
```

View File

@@ -1 +1 @@
1.1.21
1.5.0

View File

@@ -1,3 +1,29 @@
FROM scratch
FROM haskell:latest
MAINTAINER strongDM Comply <comply@strongdm.com>
# based on implementation by James Gregory <james@jagregory.com>
MAINTAINER Comply <comply@strongdm.com>
# install latex packages
RUN apt-get update -y \
&& apt-get install -y -o Acquire::Retries=10 --no-install-recommends \
texlive-latex-base \
texlive-xetex \
texlive-fonts-recommended \
latex-xcolor \
texlive-latex-extra \
fontconfig \
unzip \
lmodern
# will ease up the update process
# updating this env variable will trigger the automatic build of the Docker image
ENV PANDOC_VERSION "2.2.1"
# install pandoc
RUN cabal update && cabal install pandoc-${PANDOC_VERSION}
WORKDIR /source
ENTRYPOINT ["/root/.cabal/bin/pandoc"]
CMD ["--help"]

View File

@@ -1,7 +1,31 @@
name: "Acme"
filePrefix: "Acme"
# The following setting is optional.
# If you set this (to, e.g. master), and you build the policies
# on that branch, then a section is appended to each policy that
# describes the approval. Text will look like:
#
# Last edit made by John Doe (jdoe@email.com) on Wed, 15 Aug 2018 12:45:28 -0400.
# Approved by Joan Smith (jsmith@email.com) on Wed, 15 Aug 2018 16:54:48 -0400 in commit abc123123.
#
# The change author gets credit for the edit.
# The person who committed or merged to the approval branch gets credit for approval.
approvedBranch: master
tickets:
github:
token: XXX
username: strongdm
repo: comply
repo: comply
# jira:
# username: xxxx # This is the username you log in to Jira's UI with. Probably your email address.
# password: xxxx # If you don't have a "managed account", use your password in this field. But if your organization
# # uses SAML or OAuth, or Jira's built-in multi-factor authentication, you need to use
# # an API token. Learn more here: https://confluence.atlassian.com/cloud/api-tokens-938839638.html
# project: comply
# url: https://yourjira
# taskType: Task # This must be an Issue, not a sub-task
# gitlab:
# domain: https://gitlab.example.com:443/ # or https://gitlab.com/
# token: token-here
# repo: full-slug/of-project

View File

@@ -17,6 +17,78 @@ majorRevisions:
# Control Environment Narrative
Here we narrate why our control environment satisfies the control keys listed in the YML block
The following provides a description of the control structure of {{.Name}}.
# Template Coming Soon
The intent of this description is to enumerate the logical, policy, and procedural controls that serve to monitor {{.Name}}'s application and data security. Changes uncovered by these procedures in the logical, policy, procedural, or customer environment are addressed by remediations specific to the noted change.
# Logical Controls
{{.Name}} employs several logical controls to protect confidential data and ensure normal operation of its core product.
- Mandatory data encryption at rest and in motion
- Multi-factor authentication for access to cloud infrastructure
- Activity and anomaly monitoring on production systems
- Vulnerability management program
# Policy Controls
{{.Name}} employs several policy controls to protect confidential data and ensure normal operation of its core product. These policies include, but are not limited to:
- Access Control Policy
- Encryption Policy
- Office Security Policy
- Password Policy
- Policy Training Policy
- Vendor Policy
- Workstation Policy
# Procedural Controls
{{.Name}} has numerous scheduled procedures to monitor and tune the effectiveness of ongoing security controls, and a series of event-driven procedures to respond to security-related events.
TODO: Finalize these lists
## Scheduled Security and Audit Procedures
- Review Access [quarterly]
- Review Security Logs [weekly]
- Review Cyber Risk Assessment (enumerate possible compromise scenarios) [quarterly]
- Review Data Classification [quarterly]
- Backup Testing [quarterly]
- Disaster Recovery Testing [semi-annual]
- Review Devices & Workstations [quarterly]
- Review & Clear Low-Priority Alerts [weekly]
- Apply OS Patches [monthly]
- Verify Data Disposal per Retention Policy [quarterly]
- Conduct Security Training [annual]
- Review Security Monitoring and Alerting Configuration [quarterly]
- Penetration Test [annual]
- Whitebox Security Review [annual]
- SOC2 Audit [annual]
## Event-Driven Security and Audit Procedures
- Onboard Employee
- Offboard Employee
- Investigate Security Alert
- Investigate Security Incident
# Remediations
{{.Name}} uses the outcomes of the aforementioned controls and procedures to identify shortcomings in the existing control environment. Once identified, these shortcomes are remediated by improving existing controls and procedures, and creating new controls and procedures as needed.
# Communications
{{.Name}} communicates relevant information regarding the functioning of the above controls with internal and external parties on an as-needed basis and according to statutory requirements.
## Internal
{{.Name}} communicates control outcomes, anomalies, and remediations internally using the following channels:
- Slack
- Email
- Github ticketing
## External
{{.Name}} communicates relevant control-related information to external parties including shareholders, customers, contractors, regulators, and government entities as needed according to contractual and regulatory/statutory obligation.

View File

@@ -9,4 +9,39 @@ majorRevisions:
Here we describe the key products marketed by our organization
# Template Coming Soon
# Products
## Product 1
Overview of product 1
### Architecture
Brief architectural discussion of product 1
### Security Considerations
Specific security considerations for product 1. Refer to policies, procedures here.
# References
## Narratives
List relevant narratives, probably including
Organizational Narrative
Security Narrative
System Narrative
## Policies
List relevant policies, probably including
Application Security Policy
Datacenter Policy
Log Management Policy
Password Policy
Security Incident Response Policy
Risk Assessment Policy
## Procedures
List relevant procedures, probably including access review, patching, alert monitoring, log review, pen testing

View File

@@ -15,4 +15,99 @@ majorRevisions:
Here we narrate why our org satisfies the control keys listed in the YML block
# Template Coming Soon
# {{.Name}} Product Architecture
Describe product architecture here, emphasizing security implications
# {{.Name}} Infrastructure
## Product Infrastructure
Describe product infrastructure, emphasizing security measures
### Authorized Personnel
- **AWS root account** access is granted only to the CTO and CEO
- **AWS IAM** access is granted to to a limited group of **Operators**
- **{{.Name}} SSH** access is granted to a limited group of **Operators**
- **{{.Name}} DB** access is granted to a limited group of **Data Operators**
## IT Infrastructure
{{.Name}} uses the following cloud services for its internal infrastructure:
- List cloud services
Access to these cloud services is limited according to the role of the {{.Name}} employee and is reviewed quarterly as well as via regular onboarding/offboarding tasks for new and departing employees.
# {{.Name}} Workstations
{{.Name}} workstations are hardened against logical and physical attack by the following measures:
- operating system must be within one generation of current
- full-disk encryption
- onboard antivirus/antimalware software
- OS and AV automatically updated
Workstation compliance with these measures is evaluated on a quarterly basis.
## Remote Access
Many {{.Name}} employees work remotely on a regular basis and connect to production and internal IT systems via the same methods as those employees connecting from the {{.Name}} physical office, i.e., direct encrypted access to cloud services. It is the employee's responsibility to ensure that only authorized personnel use {{.Name}} resources and access {{.Name}} systems.
# Access Review
Access to {{.Name}} infrastructure, both internal and product, is reviewed quarterly and inactive users are removed. Any anomalies are reported to the security team for further investigation. When employees start or depart, an onboarding/offboarding procedure is followed to provision or deprovision appropriate account access.
# Penetration Testing
{{.Name}} commissions an external penetration test on an annual basis. All findings are immediately reviewed and addressed to the satisfaction of the CTO/CEO.
# {{.Name}} Physical Security
{{.Name}} has one physical location, in San Francisco, CA. Key issuance is tracked by the Office Physical Security Policy Ledger. Office keys are additionally held by the lessor, property management, and custodial staff. These keys are not tracked by the Office Physical Security Policy Ledger. {{.Name}} managers regularly review physical access privileges.
{{.Name}} infrastructure is located within AWS. {{.Name}} does not have physical access to AWS infrastructure.
# Risk Assessment
{{.Name}} updates its Cyber Risk Assessment on an annual basis in order to keep pace with the evolving threat landscape. The following is an inventory of adversarial and non-adversarial threats assessed to be of importance to {{.Name}}.
## Adversarial Threats
The following represents the inventory of adversarial threats:
|Threat|Source|Vector|Target|Likelihood|Severity|
|----------------------------+--------------+------------+-----------------+----------+------|
| | | | | | |
## Non-Adversarial Threats
The following represents the inventory of non-adversarial threats:
|Threat|Vector|Target|Likelihood|Severity|
|----------------------------+--------------+-------------+----------+------|
| | | | | |
# References
## Narratives
Products and Services Narrative
System Architecture Narrative
## Policies
Encryption Policy
Log Management Policy
Office Security Policy
Remote Access Policy
Security Incident Response Policy
Workstation Policy
## Procedures
Apply OS Patches
Review & Clear Low-Priority Alerts
Review Access
Review Devices & Workstations

View File

@@ -9,5 +9,49 @@ majorRevisions:
- date: Jun 1 2018
comment: Initial document
---
# Purpose and Scope
a. The purpose of this policy to define procedures to onboard and offboard users to technical infrastructure in a manner that minimizes the risk of information loss or exposure.
a. This policy applies to all technical infrastructure within the organization.
a. This policy applies to all full-time and part-time employees and contractors.
# Background
a. In order to minimize the risk of information loss or exposure (from both inside and outside the organization), the organization is reliant on the principle of least privilege. Account creation and permission levels are restricted to only the resources absolutely needed to perform each persons job duties. When a users role within the organization changes, those accounts and permission levels are changed/revoked to fit the new role and disabled when the user leaves the organization altogether.
# Policy
a. *During onboarding:*
i. Hiring Manager informs HR upon hire of a new employee.
i. HR emails IT to inform them of a new hire and their role.
i. IT creates a checklist of accounts and permission levels needed for that role.
i. The owner of each resource reviews and approves account creation and the
associated permissions.
i. IT works with the owner of each resource to set up the user.
a. *During offboarding:*
i. Hiring Manager notifies HR when an employee has been terminated.
i. HR sends a weekly email report to IT summarizing list of users terminated and instructs IT to disable their access.
i. IT terminates access within five business days from receipt of notification.
a. *When an employee changes roles within the organization:*
i. Hiring Manager will inform HR of a change in role.
i. HR and IT will follow the same steps as outlined in the onboarding and offboarding procedures.
a. *Review of accounts and permissions:*
i. Each month, IT and HR will review accounts and permission levels for accuracy.
# Coming Soon

View File

@@ -9,4 +9,92 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define requirements for proper controls to protect the availability of the organizations information systems.
a. This policy applies to all users of information systems within the organization. This typically includes employees and contractors, as well as any external parties that come into contact with systems and information controlled by the organization (hereinafter referred to as “users”). This policy must be made readily available to all users.
# Background
a. The intent of this policy is to minimize the amount of unexpected or unplanned downtime (also known as outages) of information systems under the organizations control. This policy prescribes specific measures for the organization that will increase system redundancy, introduce failover mechanisms, and implement monitoring such that outages are prevented as much as possible. Where they cannot be prevented, outages will be quickly detected and remediated.
a. Within this policy, an availability is defined as a characteristic of information or information systems in which such information or systems can be accessed by authorized entities whenever needed.
# References
a. Risk Assessment Policy
# Policy
a. Information systems must be consistently available to conduct and support business operations.
a. Information systems must have a defined availability classification, with appropriate controls enabled and incorporated into development and production processes based on this classification.
a. System and network failures must be reported promptly to the organizations lead for Information Technology (IT) or designated IT operations manager.
a. Users must be notified of scheduled outages (e.g., system maintenance) that require periods of downtime. This notification must specify the date and time of the system maintenance, expected duration, and anticipated system or service resumption time.
a. Prior to production use, each new or significantly modified application must have a completed risk assessment that includes availability risks. Risk assessments must be completed in accordance with the Risk Assessment Policy (reference (a)).
a. Capacity management and load balancing techniques must be used, as deemed necessary, to help minimize the risk and impact of system failures.
a. Information systems must have an appropriate data backup plan that ensures:
i. All sensitive data can be restored within a reasonable time period.
i. Full backups of critical resources are performed on at least a weekly basis.
i. Incremental backups for critical resources are performed on at least a daily basis.
i. Backups and associated media are maintained for a minimum of thirty (30) days and retained for at least one (1) year, or in accordance with legal and regulatory requirements.
i. Backups are stored off-site with multiple points of redundancy and protected using encryption and key management.
i. Tests of backup data must be conducted once per quarter. Tests of configurations must be conducted twice per year.
a. Information systems must have an appropriate redundancy and failover plan that meets the following criteria:
i. Network infrastructure that supports critical resources must have system-level redundancy (including but not limited to a secondary power supply, backup disk-array, and secondary computing system). Critical core components (including but not limited to routers, switches, and other devices linked to Service Level Agreements (SLAs)) must have an actively maintained spare. SLAs must require parts replacement within twenty-four (24) hours.
i. Servers that support critical resources must have redundant power supplies and network interface cards. All servers must have an actively maintained spare. SLAs must require parts replacement within twenty-four (24) hours.
i. Servers classified as high availability must use disk mirroring.
a. Information systems must have an appropriate business continuity plan that meets the following criteria:
i. Recovery time and data loss limits are defined in Table 3.
i. Recovery time requirements and data loss limits must be adhered to with specific documentation in the plan.
i. Company and/or external critical resources, personnel, and necessary corrective actions must be specifically identified.
i. Specific responsibilities and tasks for responding to emergencies and resuming business operations must be included in the plan.
i. All applicable legal and regulatory requirements must be satisfied.
+-------------------+------------------+---------------+-------------------+------------------+
|**Availability** | **Availability** | **Scheduled** | **Recovery Time** | **Data Loss or** |
|**Classification** | **Requirements** | **Outage** | **Requirements** | **Impact Loss** |
+===================+==================+===============+===================+==================+
| High | High to | 30 minutes | 1 hour | Minimal |
| | Continuous | | | |
+-------------------+------------------+---------------+-------------------+------------------+
| | | | | |
+-------------------+------------------+---------------+-------------------+------------------+
| Medium | Standard | 2 hours | 4 hours | Some data loss |
| | Availability | | | is tolerated if |
| | | | | it results in |
| | | | | quicker |
| | | | | restoration |
+-------------------+------------------+---------------+-------------------+------------------+
| | | | | |
+-------------------+------------------+---------------+-------------------+------------------+
| Low | Limited | 4 hours | Next | Some data loss |
| | Availability | | business day | is tolerated if |
| | | | | it results in |
| | | | | quicker |
| | | | | restoration |
+-------------------+------------------+---------------+-------------------+------------------+
Table 3: Recovery Time and Data Loss Limits

View File

@@ -7,5 +7,279 @@ majorRevisions:
- date: Jun 1 2018
comment: Initial document
---
# Appendices
Appendix A: Handling of Classified Information
Appendix B: Form - Confidentiality Statement
# Purpose and Scope
a. This data classification policy defines the requirements to ensure that information within the organization is protected at an appropriate level.
a. This document applies to the entire scope of the organizations information security program. It includes all types of information, regardless of its form, such as paper or electronic documents, applications and databases, and knowledge or information that is not written.
a. This policy applies to all individuals and systems that have access to information kept by the organization.
# Background
a. This policy defines the high level objectives and implementation instructions for the organizations data classification scheme. This includes data classification levels, as well as procedures for the classification, labeling and handling of data within the organization. Confidentiality and non-disclosure agreements maintained by the organization must reference this policy.
# References
a. Risk Assessment Policy
a. Security Incident Management Policy
# Policy
a. If classified information is received from outside the organization, the person who receives the information must classify it in accordance with the rules prescribed in this policy. The person thereby will become the owner of the information.
a. If classified information is received from outside the organization and handled as part of business operations activities (e.g., customer data on provided cloud services), the information classification, as well as the owner of such information, must be made in accordance with the specifications of the respective customer service agreement and other legal requirements.
a. When classifying information, the level of confidentiality is determined by:
i. The value of the information, based on impacts identified during the risk assessment process. More information on risk assessments is defined in the Risk Assessment Policy (reference (a)).
i. Sensitivity and criticality of the information, based on the highest risk calculated for each information item during the risk assessment.
i. Legal, regulatory and contractual obligations.
+-------------------+------------------+---------------------------+---------------------------+
|**Confidentiality**| **Label** | **Classification** | **Access** |
| **Level** | | **Criteria** | **Restrictions** |
+===================+==================+===========================+============================+
| Public | For Public | Making the information | Information is available |
| | Release | public will not harm | to the public. |
| | | the organization in | |
| | | any way. | |
+-------------------+------------------+---------------------------+---------------------------+
| | | | |
+-------------------+------------------+---------------------------+---------------------------+
| Internal Use | Internal Use | Unauthorized access | Information is available |
| | | may cause minor damage | to all employees and |
| | | and/or inconvenience | authorized third parties. |
| | | to the organization. |
+-------------------+------------------+---------------------------+---------------------------+
| | | | |
+-------------------+------------------+---------------------------+---------------------------+
| Restricted | Restricted | Unauthorized access to | Information is available |
| | | information may cause | to a specific group of |
| | | considerable damage to | employees and authhorized |
| | | the business and/or | third parties. |
| | | the organization's | |
| | | reputation. | |
+-------------------+------------------+---------------------------+---------------------------+
| | | | |
+-------------------+------------------+---------------------------+---------------------------+
| Confidential |Confidential | Unauthorized access to | Information is available |
| | | information may cause | only to specific indivi- |
| | | catastrophic damage to | duals in the |
| | | business and/or the | organization. |
| | | organization's reputation.| |
+-------------------+------------------+---------------------------+---------------------------+
Table 3: Information Confidentiality Levels
&nbsp;
d. Information must be classified based on confidentiality levels as defined in Table 3.
e. Information and information system owners should try to use the lowest confidentiality level that ensures an adequate level of protection, thereby avoiding unnecessary production costs.
f. Information classified as “Restricted” or “Confidential” must be accompanied by a list of authorized persons in which the information owner specifies the names or job functions of persons who have the right to access that information.
g. Information classified as “Internal Use” must be accompanied by a list of authorized persons only if individuals outside the organization will have access to the document.
h. Information and information system owners must review the confidentiality level of their information assets every five years and assess whether the confidentiality level should be changed. Wherever possible, confidentiality levels should be lowered.
a. For cloud-based software services provided to customers, system owners under the companys control must also review the confidentiality level of their information systems after service agreement changes or after a customers formal notification. Where allowed by service agreements, confidentiality levels should be lowered.
a. Information must be labeled according to the following:
i. Paper documents: the confidentiality level is indicated on the top and bottom of each document page; it is also indicated on the front of the cover or envelope carrying such a document as well as on the filing folder in which the document is stored. If a document is not labeled, its default classification is Internal Use.
i. Electronic documents: the confidentiality level is indicated on the top and bottom of each document page. If a document is not labeled, its default classification is Internal Use.
i. Information systems: the confidentiality level in applications and databases must be indicated on the system access screen, as well as on the screen when displaying such information.
i. Electronic mail: the confidentiality level is indicated in the first line of the email body. If it is not labeled, its default classification is “Internal Use”.
i. Electronic storage media (disks, memory cards, etc.): the confidentiality level must be indicated on the top surface of the media. If it is not labeled, its default classification is “Internal Use”.
i. Information transmitted orally: the confidentiality level should be mentioned before discussing information during face-to-face communication, by telephone, or any other means of oral communication.
a. All persons accessing classified information must follow the guidelines listed in Appendix A, “Handling of Classified Information.”
a. All persons accessing classified information must complete and submit a Confidentiality Statement to their immediate supervisor or company point-of-contact. A sample Confidentiality Statement is in Appendix B.
a. Incidents related to the improper handling of classified information must be reported in accordance with the Security Incident Management Policy (reference (b)).
\pagebreak
# Appendix A: Handling of Classified Information
Information and information systems must be handled according to the following guidelines*:
a. Paper Documents
i. Internal Use
1. Only authorized persons may have access.
1. If sent outside the organization, the document must be sent as registered mail.
1. Documents may only be kept in rooms without public access.
1. Documents must be removed expeditiously from printers and fax machines.
i. Restricted
1. The document must be stored in a locked cabinet.
1. Documents may be transferred within and outside the organization only in a closed envelope.
1. If sent outside the organization, the document must be mailed with a return receipt service.
1. Documents must immediately be removed from printers and fax machines.
1. Only the document owner may copy the document.
1. Only the document owner may destroy the document.
i. Confidential
1. The document must be stored in a safe.
1. The document may be transferred within and outside the organization only by a trustworthy person in a closed and sealed envelope.
1. Faxing the document is not permitted.
1. The document may be printed only if the authorized person is standing next to the printer.
a. Electronic Documents
i. Internal Use
1. Only authorized persons may have access.
1. When documents are exchanged via unencrypted file sharing services such as FTP, they must be password protected.
1. Access to the information system where the document is stored must be protected by a strong password.
1. The screen on which the document is displayed must be automatically locked after 10 minutes of inactivity.
i. Restricted
1. Only persons with authorization for this document may access the part of the information system where this document is stored.
1. When documents are exchanged via file sharing services of any type, they must be encrypted.
1. Only the document owner may erase the document.
i. Confidential
1. The document must be stored in encrypted form.
1. The document may be stored only on servers which are controlled by the organization.
1. The document may only be shared via file sharing services that are encrypted such as HTTPS and SSH. Further, the document must be encrypted and protected with a string password when transferred.
a. Information Systems
i. Internal Use
1. Only authorized persons may have access.
1. Access to the information system must be protected by a strong password.
1. The screen must be automatically locked after 10 minutes of inactivity.
1. The information system may be only located in rooms with controlled physical access.
i. Restricted
1. Users must log out of the information system if they have temporarily or permanently left the workplace.
1. Data must be erased only with an algorithm that ensures secure deletion.
i. Confidential
1. Access to the information system must be controlled through multi-factor authentication (MFA).
1. The information system may only be installed on servers controlled by the organization.
1. The information system may only be located in rooms with controlled physical access and identity control of people accessing the room.
a. Electronic Mail
i. Internal Use
1. Only authorized persons may have access.
1. The sender must carefully check the recipient.
1. All rules stated under “information systems” apply.
i. Restricted
1. Email must be encrypted if sent outside the organization.
i. Confidential
1. Email must be encrypted.
a. Electronic Storage Media
i. Internal Use
1. Only authorized persons may have access.
1. Media or files must be password protected.
1. If sent outside the organization, the medium must be sent as registered mail.
1. The medium may only be kept in rooms with controlled physical access.
i. Restricted
1. Media and files must be encrypted.
1. Media must be stored in a locked cabinet.
1. If sent outside the organization, the medium must be mailed with a return receipt service.
1. Only the medium owner may erase or destroy the medium.
i. Confidential
1. Media must be stored in a safe.
1. Media may be transferred within and outside the organization only by a trustworthy person and in a closed and sealed envelope.
a. Information Transmitted Orally
i. Internal Use
1. Only authorized persons may have access to information.
1. Unauthorized persons must not be present in the room when the information is communicated.
i. Restricted
1. The room must be sound-proof.
1. The conversation must not be recorded.
i. Confidential
1. Conversation conducted through electronic means must be encrypted.
1. No transcript of the conversation may be kept.
In this document, controls are implemented cumulatively, meaning that controls for any confidentiality level imply the implementation of controls defined for lower confidentiality levels - if stricted controls are prescribed for a higher confidentiality level, then only such controls are implemented.
# Coming Soon

View File

@@ -8,4 +8,188 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define requirements for establishing and maintaining baseline protection standards for company software, network devices, servers, and desktops.
a. This policy applies to all users performing software development, system administration, and management of these activities within the organization. This typically includes employees and contractors, as well as any relevant external parties involved in these activities (hereinafter referred to as “users”). This policy must be made readily available to all users.
a. This policy also applies to enterprise-wide systems and applications developed by the organization or on behalf of the organization for production implementation.
# Background
a. The intent of this policy is to ensure a well-defined, secure and consistent process for managing the entire lifecycle of software and information systems, from initial requirements analysis until system decommission. The policy defines the procedure, roles, and responsibilities, for each stage of the software development lifecycle.
a. Within this policy, the software development lifecycle consists of requirements analysis, architecture and design, development, testing, deployment/implementation, operations/maintenance, and decommission. These processes may be followed in any form; in a waterfall model, it may be appropriate to follow the process linearly, while in an agile development model, the process can be repeated in an iterative fashion.
# References
a. Risk Assessment Policy
# Policy
a. The organizations Software Development Life Cycle (SDLC) includes the following phases:
i. Requirements Analysis
i. Architecture and Design
i. Testing
i. Deployment/Implementation
i. Operations/Maintenance
i. Decommission
a. During all phases of the SDLC where a system is not in production, the system must not have live data sets that contain information identifying actual people or corporate entities, actual financial data such as account numbers, security codes, routing information, or any other financially identifying data. Information that would be considered sensitive must never be used outside of production environments.
a. The following activities must be completed and/or considered during the requirements analysis phase:
i. Analyze business requirements.
i. Perform a risk assessment. More information on risk assessments is discussed in the Risk Assessment Policy (reference (a)).
i. Discuss aspects of security (e.g., confidentiality, integrity, availability) and how they might apply to this requirement.
i. Review regulatory requirements and the organizations policies, standards, procedures and guidelines.
i. Review future business goals.
i. Review current business and information technology operations.
i. Incorporate program management items, including:
1. Analysis of current system users/customers.
1. Understand customer-partner interface requirements (e.g., business-level, network).
1. Discuss project timeframe.
i. Develop and prioritize security solution requirements.
i. Assess cost and budget constraints for security solutions, including development and operations.
i. Approve security requirements and budget.
i. Make “buy vs. build” decisions for security services based on the information above.
a. The following must be completed/considered during the architecture and design phase:
i. Educate development teams on how to create a secure system.
i. Develop and/or refine infrastructure security architecture.
i. List technical and non-technical security controls.
i. Perform architecture walkthrough.
i. Create a system-level security design.
i. Create high-level non-technical and integrated technical security designs.
i. Perform a cost/benefit analysis for design components.
i. Document the detailed technical security design.
i. Perform a design review, which must include, at a minimum, technical reviews of application and infrastructure, as well as a review of high-level processes.
i. Describe detailed security processes and procedures, including: segregation of duties and segregation of development, testing and production environments.
i. Design initial end-user training and awareness programs.
i. Design a general security test plan.
i. Update the organizations policies, standards, and procedures, if appropriate.
i. Assess and document how to mitigate residual application and infrastructure vulnerabilities.
i. Design and establish separate development and test environments.
a. The following must be completed and/or considered during the development phase:
i. Set up a secure development environment (e.g., servers, storage).
i. Train infrastructure teams on installation and configuration of applicable software, if required.
i. Develop code for application-level security components.
i. Install, configure and integrate the test infrastructure.
i. Set up security-related vulnerability tracking processes.
i. Develop a detailed security test plan for current and future versions (i.e., regression testing).
i. Conduct unit testing and integration testing.
a. The following must be completed and/or considered during the testing phase:
i. Perform a code and configuration review through both static and dynamic analysis of code to identify vulnerabilities.
i. Test configuration procedures.
i. Perform system tests.
i. Conduct performance and load tests with security controls enabled.
i. Perform usability testing of application security controls.
i. Conduct independent vulnerability assessments of the system, including the infrastructure and application.
a. The following must be completed and/or considered during the deployment phase:
i. Conduct pilot deployment of the infrastructure, application and other relevant components.
i. Conduct transition between pilot and full-scale deployment.
i. Perform integrity checking on system files to ensure authenticity.
i. Deploy training and awareness programs to train administrative personnel and users in the systems security functions.
i. Require participation of at least two developers in order to conduct full-scale deployment to the production environment.
a. The following must be completed and/or considered during the operations/maintenance phase:
i. Several security tasks and activities must be routinely performed to operate and administer the system, including but not limited to:
1. Administering users and access.
1. Tuning performance.
1. Performing backups according to requirements defined in the System Availability Policy
1. Performing system maintenance (i.e., testing and applying security updates and patches).
1. Conducting training and awareness.
1. Conducting periodic system vulnerability assessments.
1. Conducting annual risk assessments.
i. Operational systems must:
1. Be reviewed to ensure that the security controls, both automated and manual, are functioning correctly and effectively.
1. Have logs that are periodically reviewed to evaluate the security of the system and validate audit controls.
1. Implement ongoing monitoring of systems and users to ensure detection of security violations and unauthorized changes.
1. Validate the effectiveness of the implemented security controls through security training as required by the Procedure For Executing Incident Response.
1. Have a software application and/or hardware patching process that is performed regularly in order to eliminate software bug and security problems being introduced into the organizations technology environment. Patches and updates must be applied within ninety (90) days of release to provide for adequate testing and propagation of software updates. Emergency, critical, break-fix, and zero-day vulnerability patch releases must be applied as quickly as possible.
a. The following must be completed and/or considered during the decommission phase:
i. Conduct unit testing and integration testing on the system after component removal.
i. Conduct operational transition for component removal/replacement.
i. Determine data retention requirements for application software and systems data.
i. Document the detailed technical security design.
i. Update the organizations policies, standards and procedures, if appropriate.
i. Assess and document how to mitigate residual application and infrastructure vulnerabilities.

View File

@@ -8,7 +8,8 @@ majorRevisions:
- date: Jun 1 2018
comment: Initial document
---
#Purpose and Scope
# Purpose and Scope
a. The purpose of this policy is to define the organizations procedures to recover Information Technology (IT) infrastructure and IT services within set deadlines in the case of a disaster or other disruptive incident. The objective of this plan is to complete the recovery of IT infrastructure and IT services within a set Recovery Time Objective (RTO).

View File

@@ -7,5 +7,76 @@ majorRevisions:
- date: Jun 1 2018
comment: Initial document
---
# Purpose and Scope
a. This policy defines organizational requirements for the use of cryptographic controls, as well as the requirements for cryptographic keys, in order to protect the confidentiality, integrity, authenticity and nonrepudiation of information.
a. This policy applies to all systems, equipment, facilities and information within the scope of the organizations information security program.
a. All employees, contractors, part-time and temporary workers, service providers, and those employed by others to perform work on behalf of the organization having to do with cryptographic systems, algorithms, or keying material are subject to this policy and must comply with it.
# Background
a. This policy defines the high level objectives and implementation instructions for the organizations use of cryptographic algorithms and keys. It is vital that the organization adopt a standard approach to cryptographic controls across all work centers in order to ensure end-to-end security, while also promoting interoperability. This document defines the specific algorithms approved for use, requirements for key management and protection, and requirements for using cryptography in cloud environments.
# Policy
a. The organization must protect individual systems or information by means of cryptographic controls as defined in Table 3:
\pagebreak
+---------------------+-------------------+----------------+--------------+
| **Name of System/** | **Cryptographic** | **Encryption** | **Key Size** |
| **Type of** | **Tool** | **Algorithm** | |
| **Information** | | | |
+=====================+===================+================+==============+
| Public Key | OpenSSL | AES-256 | 256-bit key |
| Infrastructure for | | | |
| Authentication | | | |
+---------------------+-------------------+----------------+--------------+
| | | | |
+---------------------+-------------------+----------------+--------------+
| Data Encryption | OpenSSL | AES-256 | 256-bit key |
| Keys | | | |
+---------------------+-------------------+----------------+--------------+
| | | | |
+---------------------+-------------------+----------------+--------------+
| Virtual Private | OpenSSL and | AES-256 | 256-bit key |
| Network (VPN) | OpenVPN | | |
| keys | | | |
+---------------------+-------------------+----------------+--------------+
| | | | |
+---------------------+-------------------+----------------+--------------+
| Website SSL | OpenSSL, CERT | AES-256 | 256-bit key |
| Certificate | | | |
+---------------------+-------------------+----------------+--------------+
Table 3: Cryptographic Controls
&nbsp;
b. Except where otherwise stated, keys must be managed by their owners.
c. Cryptographic keys must be protected against loss, change or destruction by applying appropriate access control mechanisms to prevent unauthorized use and backing up keys on a regular basis.
d. When required, customers of the organizations cloud-based software or platform offering must be able to obtain information regarding:
i. The cryptographic tools used to protect their information.
i. Any capabilities that are available to allow cloud service customers to apply their own cryptographic solutions.
i. The identity of the countries where the cryptographic tools are used to store or transfer cloud service customers data.
a. The use of organizationally-approved encryption must be governed in accordance with the laws of the country, region, or other regulating entity in which users perform their work. Encryption must not be used to violate any laws or regulations including import/export restrictions. The encryption used by the Company conforms to international standards and U.S. import/export requirements, and thus can be used across international boundaries for business purposes.
a. All key management must be performed using software that automatically manages access control, secure storage, backup and rotation of keys. Specifically:
i. The key management service must provide key access to specifically-designated users, with the ability to encrypt/decrypt information and generate data encryption keys.
i. The key management service must provide key administration access to specifically-designated users, with the ability to create, schedule delete, enable/disable rotation, and set usage policies for keys.
i. The key management service must store and backup keys for the entirety of their operational lifetime.
i. The key management service must rotate keys at least once every 12 months.
# Coming Soon

View File

@@ -8,4 +8,133 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. This removable media, cloud storage and Bring Your Own Device (BYOD) policy defines the objectives, requirements and implementing instructions for storing data on removable media, in cloud environments, and on personally-owned devices, regardless of data classification level.
a. This policy applies to all information and data within the organizations information security program, as well as all removable media, cloud systems and personally-owned devices either owned or controlled by the organization.
a. This policy applies to all users of information systems within the organization. This typically includes employees and contractors, as well as any external parties that come into contact with systems and information controlled by the organization (hereinafter referred to as “users”). This policy must be made readily available to all users.
# Background
a. This policy defines the procedures for safely using removable media, cloud storage and personally-owned devices to limit data loss or exposure. Such forms of storage must be strictly controlled because of the sensitive data that can be stored on them. Because each of these storage types are inherently ephemeral or portable in nature, it is possible for the organization to lose the ability to oversee or control the information stored on them if strict security standards are not followed.
a. This document consists of three sections pertaining to removable media, cloud storage, and personally-owned devices. Each section contains requirements and implementing instructions for the registration, management, maintenance, and disposition of each type of storage.
a. Within this policy, the term sensitive information refers to information that is classified as RESTRICTED or CONFIDENTIAL in accordance with the Data Classification Policy (reference (a)).
# References
a. Data Classification Policy
a. Asset Inventory
a. Security Incident Response Policy
a. Encryption Policy
# Policy
a. *Removable Media*
i. All removable media in active use and containing data pertinent to the organization must be registered in the organizations Asset Inventory (reference (b)).
i. All removable media listed in reference (b) must be re-inventoried on a quarterly basis to ensure that it is still within the control of the organization.
1. To re-inventory an item, the owner of the removable media must check in the item with the organizations Information Security Manager (ISM).
1. The ISM must treat any removable media that cannot be located as lost, and a security incident report must be logged in accordance with the Security Incident Response Policy (reference (c)).
i. The owner of the removable media must conduct all appropriate maintenance on the item at intervals appropriate to the type of media, such as cleaning, formatting, labeling, etc.
i. The owner of the removable media, where practical, must ensure that an alternate or backup copy of the information located on the device exists.
i. Removable media must be stored in a safe place that has a reduced risk of fire or flooding damage.
i. If the storage item contains sensitive information, removable media must:
1. Be stored in a locked cabinet or drawer.
1. Store only encrypted data that is securely enciphered in accordance with the Encryption Policy (reference (d)).
i. All data on removable media devices must be erased, or the device must be destroyed, before it is reused or disposed of.
i. When removable media devices are disposed, the device owner must inform the ISM so that it can be removed from reference (b).
a. *Cloud Storage*
i. All cloud storage systems in active use and containing data pertinent to the organization must be registered in reference (b). Registration may be accomplished by manual or automated means.
i. All cloud storage systems listed in reference (b) must be re-inventoried on a quarterly basis to ensure that it is still within the control of the organization. To re-inventory an item, the owner of the removable media must check in the item with the organizations Information Security Manager (ISM). Re-inventory may be accomplished by manual or automated means.
i. The owner of the cloud storage system must conduct all appropriate maintenance on the system at regular intervals to include system configuration, access control, performance monitoring, etc.
i. Data on cloud storage systems must be replicated to at least one other physical location. Depending on the cloud storage provider, this replication may be automatically configured.
i. The organization must only use cloud storage providers that can demonstrate, either through security accreditation, demonstration, tour, or other means that their facilities are secured, both physically and electronically, using best practices.
i. If the cloud storage system contains sensitive information, that information must be encrypted in accordance with reference (d).
i. Data must be erased from from cloud storage systems using a technology and process that is approved by the ISM.
i. When use of a cloud storage system is discontinued, the system owner must inform the ISM so that it can be removed from reference (b).
a. *Personally-owned Devices*
i. Organizational data that is stored, transferred or processed on personally-owned devices remains under the organizations ownership, and the organization retains the right to control such data even though it is not the owner of the device.
i. The ISM is responsible for conducting overall management of personally-owned devices, to include:
1. Installation and maintenance of Mobile Device Management (MDM) software that can effectively manage, control and wipe data under the organizations control from personally-owned devices.
1. Maintain a list of job titles and/or persons authorized to use personally-owned devices for the organizations business, as well as the applications and databases that may be accessed from such devices.
1. Maintain a list of applications prohibited from use on personally-owned devices, and ensuring that device users are aware of these restrictions.
i. Personally-identifiable information (PII) may not be stored, processed or accessed at any time on a personally-owned device.
i. The following acceptable use requirements must be observed by users of personally-owned devices:
1. All organizational data must be backed up at regular intervals.
1. MDM and endpoint protection software must be installed on the device at all times.
1. Sensitive information stored on the device must be encrypted in accordance with reference (d).
1. The device must be secured using a password, pin, unlock pattern, fingerprint or equivalent security mechanism.
1. The device must only connect to secure and encrypted wireless networks.
1. When using the device outside of the organizations premises, it must not be left unattended, and if possible, physically secured.
1. When using the device in public areas, the owner must take measures to ensure that the data cannot be read or accessed by unauthorized persons.
1. Patches and updates must be installed regularly.
1. Classified information must be protected in accordance with reference (a).
1. The device owner must install the ISM before the device is disposed of, sold, or provided to a third party for servicing.
1. It is prohibited to:
a. Allow device access for anyone except its owner.
a. Store illegal materials on the device.
a. Install unlicensed software.
a. Locally-store passwords.
a. Transfer organizational data to other devices which have not been approved by the organization.
i. The organization must reserve the right to view, edit, and/or delete any organizational information that is stored, processed or transferred on the device.
i. The organization must reserve the right to perform full deletion of all of its data on the device if it considers that necessary for the protection of company-related data, without the consent of the device owner.
i. The organization will not pay the employees (the owners of BYOD) any fee for using the device for work purposes.
i. The organization will pay for any new software that needs to be installed for company use.
i. All security breaches related to personally-owned devices must be reported immediately to the ISM.

View File

@@ -10,4 +10,50 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define requirements for connecting to the organizations systems and networks from remote hosts, including personally-owned devices, in order to minimize data loss/exposure.
a. This policy applies to all users of information systems within the organization. This typically includes employees and contractors, as well as any external parties that come into contact with systems and information controlled by the organization (hereinafter referred to as “users”). This policy must be made readily accessible to all users.
# Background
a. The intent of this policy is to minimize the organizations exposure to damages which may result from the unauthorized remote use of resources, including but not limited to: the loss of sensitive, company confidential data and intellectual property; damage to the organizations public image; damage to the organizations internal systems; and fines and/or other financial liabilities incurred as a result of such losses.
a. Within this policy, the following definitions apply:
i. *Mobile computing equipment:* includes portable computers, mobile phones, smart phones, memory cards and other mobile equipment used for storage, processing and transfer of data.
i. *Remote host:* is defined as an information system, node or network that is not under direct control of the organization.
i. *Telework:* the act of using mobile computing equipment and remote hosts to perform work outside the organizations physical premises. Teleworking does not include the use of mobile phones.
# Policy
a. *Security Requirements for Remote Hosts and Mobile Computing Equipment*
i. Caution must be exercised when mobile computing equipment is placed or used in uncontrolled spaces such as vehicles, public spaces, hotel rooms, meeting places, conference centers, and other unprotected areas outside the organizations premises.
i. When using remote hosts and mobile computing equipment, users must take care that information on the device (e.g. displayed on the screen) cannot be read by unauthorized persons if the device is being used to connect to the organizations systems or work with the organizations data.
i. Remote hosts must be updated and patched for the latest security updates on at least a monthly basis.
i. Remote hosts must have endpoint protection software (e.g. malware scanner) installed and updated at all times.
i. Persons using mobile computing equipment off-premises are responsible for regular backups of organizational data that resides on the the device.
i. Access to the organizations systems must be done through an encrypted and authenticated VPN connection with multi-factor authentication enabled. All users requiring remote access must be provisioned with VPN credentials from the organizations information technology team. VPN keys must be rotated at least twice per year. Revocation of VPN keys must be included in the Offboarding Policy.
i. Information stored on mobile computing equipment must be encrypted using hard drive full disk encryption.
a. *Security Requirements for Telework*
i. Employees must be specifically authorized for telework in writing from their hiring manager .
i. Only devices assigned owner is permitted to use remote nodes and mobile computing equipment. Unauthorized users (such as others living or working at the location where telework is performed) are not permitted to use such devices.
i. Devices must be authorized using certificates
i. Users performing telework are responsible for the appropriate configuration of the local network used for connecting to the Internet at their telework location.
i. Users performing telework must protect the organizations intellectual property rights, either for software or other materials that are present on remote nodes and mobile computing equipment.

View File

@@ -8,4 +8,130 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define the methodology for the assessment and treatment of information security risks within the organization, and to define the acceptable level of risk as set by the organizations leadership.
a. Risk assessment and risk treatment are applied to the entire scope of the organizations information security program, and to all assets which are used within the organization or which could have an impact on information security within it.
a. This policy applies to all employees of the organization who take part in risk assessment and risk treatment.
# Background
a. A key element of the organizations information security program is a holistic and systematic approach to risk management. This policy defines the requirements and processes for the organization to identify information security risks. The process consists of four parts: identification of the organizations assets, as well as the threats and vulnerabilities that apply; assessment of the likelihood and consequence (risk) of the threats and vulnerabilities being realized, identification of treatment for each unacceptable risk, and evaluation of the residual risk after treatment.
# References
a. Risk Assessment Report Template
# Policy
a. *Risk Assessment*
i. The risk assessment process includes the identification of threats and vulnerabilities having to do with company assets.
i. The first step in the risk assessment is to identify all assets within the scope of the information security program; in other words, all assets which may affect the confidentiality, integrity, and/or availability of information in the organization. Assets may include documents in paper or electronic form, applications, databases, information technology equipment, infrastructure, and external/outsourced services and processes. For each asset, an owner must be identified.
i. The next step is to identify all threats and vulnerabilities associated with each asset. Threats and vulnerabilities must be listed in a risk assessment table. Each asset may be associated with multiple threats, and each threat may be associated with multiple vulnerabilities. A sample risk assessment table is provided as part of the Risk Assessment Report Template (reference (a)).
i. For each risk, an owner must be identified. The risk owner and the asset owner may be the same individual.
i. Once risk owners are identified, they must assess:
1. Consequences for each combination of threats and vulnerabilities for an individual asset if such a risk materializes.
1. Likelihood of occurrence of such a risk (i.e. the probability that a threat will exploit the vulnerability of the respective asset).
1. Criteria for determining consequence and likelihood are defined in Tables 3 and 4.
i. The risk level is calculated by adding the consequence score and the likelihood score.
+-----------------+-----------------+--------------------------------------------------------------+
| **Consequence** | **Consequence** | **Description** |
| **Level** | **Score** | |
+=================+=================+==============================================================+
| Low | 0 | Loss of confidentiality, integrity, or availability will not |
| | | affect the organization's cash flow, legal, or contractual |
| | | obligations, or reputation. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| Moderate | 1 | Loss of confidentiality, integrity, or availability may incur|
| | | financial cost and has low or moderate impact on the |
| | | organization's legal or contractual obligations and/or |
| | | reputation. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| High | 2 | Loss of confidentiality, integrity, or availability will have|
| | | immediate and or/considerable impact on the organization's |
| | | cash flow, operations, legal and contractual obligations,and/|
| | | or reputation. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
Table 3: Description of Consequence Levels and Criteria
+-----------------+-----------------+--------------------------------------------------------------+
| **Likelihood** | **Likelihood** | **Description** |
| **Level** | **Score** | |
+=================+=================+==============================================================+
| Low | 0 | Either existing security controls are strong and have so far |
| | | provided an adequate level of protection, or the probability |
| | | of the risk being realized is extremely low. No new incidents|
| | | are expected in the future. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| Moderate | 1 | Either existing security controls have most provided an |
| | | adequate level of protection or the probability of the risk |
| | | being realized is moderate. Some minor incidents may have |
| | | occured. New incidents are possible, but not highly likely. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| High | 2 | Either existing security controls are not in place or |
| | | ineffective; there is a high probability of the risk being |
| | | realized. Incidents have a high likelihood of occuring in the|
| | | future. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
Table 4: Description of Likelihood Levels and Criteria
&nbsp;
b. *Risk Acceptance Criteria*
i. Risk values 0 through 2 are considered to be acceptable risks.
i. Risk values 3 and 4 are considered to be unacceptable risks. Unacceptable risks must be treated.
c. *Risk Treatment*
i. Risk treatment is implemented through the Risk Treatment Table. All risks from the Risk Assessment Table must be copied to the Risk Treatment Table for disposition, along with treatment options and residual risk. A sample Risk Treatment Table is provided in reference (a).
i. As part of this risk treatment process, the CEO and/or other company managers shall determine objectives for mitigating or treating risks. All unacceptable risks must be treated. For continuous improvement purposes, company managers may also opt to treat other risks for company assets, even if their risk score is deemed to be acceptable.
i. Treatment options for risks include the following options:
1. Selection or development of security control(s).
1. Transferring the risks to a third party; for example, by purchasing an insurance policy or signing a contract with suppliers or partners.
1. Avoiding the risk by discontinuing the business activity that causes such risk.
1. Accepting the risk; this option is permitted only if the selection of other risk treatment options would cost more than the potential impact of the risk being realized.
i. After selecting a treatment option, the risk owner should estimate the new consequence and likelihood values after the planned controls are implemented.
a. *Regular Reviews of Risk Assessment and Risk Treatment*
i. The Risk Assessment Table and Risk Treatment Table must be updated when newly identified risks are identified. At a minimum, this update and review shall be conducted once per year. It is highly recommended that the Risk Assessment and Risk Treatment Table be updated when significant changes occur to the organization, technology, business objectives, or business environment.
a. *Reporting*
i. The results of risk assessment and risk treatment, and all subsequent reviews, shall be documented in a Risk Assessment Report.

View File

@@ -8,4 +8,40 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. This policy defines the rules for relationships with the organizations Information Technology (IT) vendors and partners.
a. This policy applies to all IT vendors and partners who have the ability to impact the confidentiality, integrity, and availability of the organizations technology and sensitive information, or who are within the scope of the organizations information security program.
a. This policy applies to all employees and contractors that are responsible for the management and oversight of IT vendors and partners of the organization.
# Background
a. The overall security of the organization is highly dependent on the security of its contractual relationships with its IT suppliers and partners. This policy defines requirements for effective management and oversight of such suppliers and partners from an information security perspective. The policy prescribes minimum standards a vendor must meet from an information security standpoint, including security clauses, risk assessments, service level agreements, and incident management.
# References
a. Information Security Policy
a. Security Incident Response Policy
# Policy
a. IT vendors are prohibited from accessing the organizations information security assets until a contract containing security controls is agreed to and signed by the appropriate parties.
a. All IT vendors must comply with the security policies defined and derived from the Information Security Policy (reference (a)).
a. All security incidents by IT vendors or partners must be documented in accordance with the organizations Security Incident Response Policy (reference (b)) and immediately forwarded to the Information Security Manager (ISM).
a. The organization must adhere to the terms of all Service Level Agreements (SLAs) entered into with IT vendors. As terms are updated, and as new ones are entered into, the organization must implement any changes or controls needed to ensure it remains in compliance.
a. Before entering into a contract and gaining access to the parent organizations information systems, IT vendors must undergo a risk assessment.
i. Security risks related to IT vendors and partners must be identified during the risk assessment process.
i. The risk assessment must identify risks related to information and communication technology, as well as risks related to IT vendor supply chains, to include sub-suppliers.
a. IT vendors and partners must ensure that organizational records are protected, safeguarded, and disposed of securely. The organization strictly adheres to all applicable legal, regulatory and contractual requirements regarding the collection, processing, and transmission of sensitive data such as Personally-Identifiable Information (PII).
a. The organization may choose to audit IT vendors and partners to ensure compliance with applicable security policies, as well as legal, regulatory and contractual obligations.

View File

@@ -2,4 +2,10 @@ id: "offboard"
name: "Offboard User"
---
# Coming Soon
Resolve this ticket by executing the following steps:
- [ ] Immediately suspend user in SSO
- [ ] Append HR termination request e-mail to this ticket
- [ ] Look up manually-provisioned applications for this role or user
- [ ] Validate access revocation in each
- [ ] Append confirmation or revocation to this ticket

View File

@@ -2,4 +2,11 @@ id: "onboard"
name: "Onboard New User"
---
# Coming Soon
Resolve this ticket by executing the following steps:
- [ ] Append HR add request e-mail to this ticket
- [ ] Proactively validate role assignment with manager (see HR request e-mail)
- [ ] Add user to default group for the specified role
- [ ] Provision any manually-provisioned applications by role
- [ ] Append manual provisioning confirmation to this ticket
- [ ] Proactively confirm with new user that they can access all provisioned systems

View File

@@ -1,6 +1,15 @@
id: "patch"
name: "Apply OS patches"
cron: "0 0 1 * * *"
cron: "0 0 0 15 * *"
---
# Coming Soon
# OS Patch Procedure
Resolve this ticket by executing the following steps:
- [ ] Pull the latest scripts from the Ops repository
- [ ] Execute `ENV=staging patch-all.sh`
- [ ] Inspect output
- [ ] Errors? Investigate and resolve
- [ ] Execute `ENV=production patch-all.sh`
- [ ] Attach log output to this ticket

View File

@@ -1,6 +1,40 @@
id: "workstation"
name: "Collect Workstation Details"
cron: "0 0 * * * *"
cron: "0 0 0 15 4 *"
---
# Coming Soon
Resolve this ticket by executing the following steps:
- [ ] Send the communications below
- [ ] For any email replies, attach content to this ticket
- [ ] Validate responses are received from each
```
To: Desktop support
Subject: Annual workstation inventory
Please attach the current workstation inventory to the following ticket: [REPLACE WITH URL TO THIS TICKET]
The workstation inventory shall include the following fields:
* Serial number
* Custodian
* Full disk encryption status
* Malware protection status
```
```
To: Outsourced Call Center IT
Subject: Annual workstation inventory
As part of our ongoing compliance efforts and per our services agreement, we require a current inventory of workstations in use in the service of our account.
Please respond to this message with the current inventory.
The workstation inventory shall include the following fields:
* Serial number
* Custodian
* Full disk encryption status
* Malware protection status
```

49
go.mod Normal file
View File

@@ -0,0 +1,49 @@
module github.com/strongdm/comply
go 1.12
require (
github.com/Clever/gitsem v1.0.4
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/aktau/github-release v0.8.1
github.com/andygrunwald/go-jira v1.12.0
github.com/containous/go-bindata v1.0.0
github.com/davecgh/go-spew v1.1.1
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v1.13.1
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/elazarl/go-bindata-assetfs v1.0.1
github.com/fatih/color v1.9.0
github.com/fatih/structs v1.1.0 // indirect
github.com/github-release/github-release v0.8.1 // indirect
github.com/gohugoio/hugo v0.75.0
github.com/google/go-github v17.0.0+incompatible
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/go-retryablehttp v0.6.7 // indirect
github.com/inconshreveable/log15 v0.0.0-20200109203555-b30bc20e4fd1 // indirect
github.com/jcelliott/lumber v0.0.0-20160324203708-dd349441af25 // indirect
github.com/kevinburke/rest v0.0.0-20200429221318-0d2892b400f8 // indirect
github.com/lunixbochs/vtclean v1.0.0 // indirect
github.com/manifoldco/promptui v0.7.0
github.com/mattn/go-colorable v0.1.7 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/nanobox-io/golang-scribble v0.0.0-20190309225732-aa3e7c118975
github.com/olekukonko/tablewriter v0.0.4
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/pkg/errors v0.9.1
github.com/robfig/cron v1.2.0
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect
github.com/trivago/tgo v1.0.7 // indirect
github.com/urfave/cli v1.22.4
github.com/voxelbrain/goptions v0.0.0-20180630082107-58cddc247ea2 // indirect
github.com/xanzy/go-gitlab v0.30.1
github.com/yosssi/ace v0.0.5
golang.org/x/net v0.0.0-20200904194848-62affa334b73 // indirect
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
gopkg.in/blang/semver.v1 v1.1.0 // indirect
gopkg.in/yaml.v2 v2.3.0
)

843
go.sum Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -6,15 +6,24 @@ import (
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/strongdm/comply/internal/config"
"github.com/strongdm/comply/internal/gitlab"
"github.com/strongdm/comply/internal/jira"
"github.com/strongdm/comply/internal/plugin/github"
"github.com/urfave/cli"
)
@@ -40,18 +49,20 @@ func newApp() *cli.App {
app.Usage = "policy compliance toolkit"
app.Commands = []cli.Command{
initCommand,
beforeCommand(initCommand, notifyVersion),
}
app.Commands = append(app.Commands, beforeCommand(buildCommand, projectMustExist))
app.Commands = append(app.Commands, beforeCommand(procedureCommand, projectMustExist))
app.Commands = append(app.Commands, beforeCommand(schedulerCommand, projectMustExist))
app.Commands = append(app.Commands, beforeCommand(serveCommand, projectMustExist))
app.Commands = append(app.Commands, beforeCommand(syncCommand, projectMustExist))
app.Commands = append(app.Commands, beforeCommand(todoCommand, projectMustExist))
app.Commands = append(app.Commands, beforeCommand(buildCommand, projectMustExist, notifyVersion))
app.Commands = append(app.Commands, beforeCommand(procedureCommand, projectMustExist, notifyVersion))
app.Commands = append(app.Commands, beforeCommand(schedulerCommand, projectMustExist, notifyVersion))
app.Commands = append(app.Commands, beforeCommand(serveCommand, projectMustExist, notifyVersion))
app.Commands = append(app.Commands, beforeCommand(syncCommand, projectMustExist, notifyVersion))
app.Commands = append(app.Commands, beforeCommand(todoCommand, projectMustExist, notifyVersion))
// Plugins
github.Register()
jira.Register()
gitlab.Register()
return app
}
@@ -96,7 +107,157 @@ func ticketingMustBeConfigured(c *cli.Context) error {
return nil
}
func dockerMustExist(c *cli.Context) error {
// notifyVersion asynchronously notifies the availability of version updates
func notifyVersion(c *cli.Context) error {
go func() {
defer func() {
recover() // suppress panic
}()
r, err := http.Get("http://comply-releases.s3.amazonaws.com/channel/stable/VERSION")
body, err := ioutil.ReadAll(r.Body)
if err != nil {
// fail silently
}
version := strings.TrimSpace(string(body))
// only when numeric versions are present
firstRune, _ := utf8.DecodeRuneInString(string(body))
if unicode.IsDigit(firstRune) && version != Version {
// only once every ~10 times
if rand.Intn(10) == 0 {
fmt.Fprintf(os.Stderr, "a new version of comply is available")
}
}
}()
return nil
}
func pandocMustExist(c *cli.Context) error {
eitherMustExistErr := fmt.Errorf("\n\nPlease install either Docker or the pandoc package and re-run `%s`. Find OS-specific pandoc installation instructions at: [TODO]", c.Command.Name)
pandocExistErr, found, goodVersion, pdfLatex := pandocBinaryMustExist(c)
dockerExistErr, inPath, isRunning := dockerMustExist(c)
config.SetPandoc(pandocExistErr == nil, dockerExistErr == nil)
check := func(b bool) string {
if b {
return "✔"
} else {
return "✖"
}
}
if pandocExistErr != nil && dockerExistErr != nil {
fmt.Printf(`
[%s] pandoc binary installed and in PATH
[%s] pandoc version compatible
[%s] pdflatex binary installed and in PATH
[%s] docker binary installed
[%s] docker running
`, check(found), check(goodVersion), check(pdfLatex), check(inPath), check(isRunning))
return eitherMustExistErr
}
// if we don't have pandoc, but we do have docker, execute a pull
if (pandocExistErr != nil && dockerExistErr == nil) || config.WhichPandoc() == config.UseDocker {
dockerPull(c)
}
return nil
}
func pandocBinaryMustExist(c *cli.Context) (e error, found, goodVersion, pdfLatex bool) {
cmd := exec.Command("pandoc", "-v")
outputRaw, err := cmd.Output()
e = nil
found = false
goodVersion = false
pdfLatex = false
if err != nil {
e = errors.Wrap(err, "error calling pandoc")
} else {
found = true
goodVersion = true
output := strings.TrimSpace((string(outputRaw)))
versionErr := errors.New("cannot determine pandoc version")
if !strings.HasPrefix(output, "pandoc") {
e = versionErr
goodVersion = false
} else {
re := regexp.MustCompile(`pandoc (\d+)\.(\d+)`)
result := re.FindStringSubmatch(output)
if len(result) != 3 {
e = versionErr
goodVersion = false
} else {
major, err := strconv.Atoi(result[1])
if err != nil {
e = versionErr
goodVersion = false
}
minor, err := strconv.Atoi(result[2])
if err != nil {
e = versionErr
goodVersion = false
}
if major < 2 || minor < 1 {
e = errors.New("pandoc 2.1 or greater required")
goodVersion = false
}
}
}
}
// pdflatex must also be present
cmd = exec.Command("pdflatex", "--version")
outputRaw, err = cmd.Output()
if err != nil {
e = errors.Wrap(err, "error calling pdflatex")
} else if !strings.Contains(string(outputRaw), "TeX") {
e = errors.New("pdflatex is required")
} else {
pdfLatex = true
}
return e, found, goodVersion, pdfLatex
}
func dockerMustExist(c *cli.Context) (e error, inPath, isRunning bool) {
dockerErr := fmt.Errorf("Docker must be available in order to run `%s`", c.Command.Name)
inPath = true
cmd := exec.Command("docker", "--version")
_, err := cmd.Output()
if err != nil {
inPath = false
}
isRunning = true
ctx := context.Background()
cli, err := client.NewEnvClient()
if err != nil {
isRunning = false
return dockerErr, inPath, isRunning
}
_, err = cli.Ping(ctx)
if err != nil {
isRunning = false
return dockerErr, inPath, isRunning
}
return nil, inPath, isRunning
}
func dockerPull(c *cli.Context) error {
dockerErr := fmt.Errorf("Docker must be available in order to run `%s`", c.Command.Name)
ctx := context.Background()
@@ -146,12 +307,17 @@ func dockerMustExist(c *cli.Context) error {
}
func cleanContainers(c *cli.Context) error {
dockerErr := fmt.Errorf("Docker must be available in order to run `%s`", c.Command.Name)
ctx := context.Background()
cli, err := client.NewEnvClient()
if err != nil {
return dockerErr
// no Docker? nothing to clean.
return nil
}
_, err = cli.Ping(ctx)
if err != nil {
// no Docker? nothing to clean.
return nil
}
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})

View File

@@ -11,7 +11,7 @@ var buildCommand = cli.Command{
ShortName: "b",
Usage: "generate a static website summarizing the compliance program",
Action: buildAction,
Before: beforeAll(dockerMustExist, cleanContainers),
Before: beforeAll(pandocMustExist, cleanContainers),
}
func buildAction(c *cli.Context) error {

View File

@@ -100,7 +100,7 @@ func initAction(c *cli.Context) error {
chooser = promptui.Select{
Label: "Ticket System",
Items: []string{"GitHub", "Jira", "None"},
Items: []string{"GitHub", "Jira", "GitLab", "None"},
}
choice, _, err = chooser.Run()
@@ -116,8 +116,9 @@ func initAction(c *cli.Context) error {
case 0:
ticketing = model.GitHub
case 1:
fmt.Println("\nHello Jira user! The Jira ticketing plugin is currently in development, please join us on Slack for a status update.")
ticketing = model.NoTickets
ticketing = model.Jira
case 2:
ticketing = model.GitLab
default:
ticketing = model.NoTickets
}

View File

@@ -3,6 +3,7 @@ package cli
import (
"fmt"
"github.com/strongdm/comply/internal/config"
"github.com/strongdm/comply/internal/model"
"github.com/urfave/cli"
)
@@ -13,7 +14,7 @@ var procedureCommand = cli.Command{
Usage: "create ticket by procedure ID",
ArgsUsage: "procedureID",
Action: procedureAction,
Before: projectMustExist,
Before: beforeAll(projectMustExist, ticketingMustBeConfigured),
}
func procedureAction(c *cli.Context) error {
@@ -28,14 +29,22 @@ func procedureAction(c *cli.Context) error {
procedureID := c.Args().First()
ts, err := config.Config().TicketSystem()
if err != nil {
return cli.NewExitError("error in ticket system configuration", 1)
}
tp := model.GetPlugin(model.TicketSystem(ts))
for _, procedure := range procedures {
if procedure.ID == procedureID {
// TODO: don't hardcode GH
tp := model.GetPlugin(model.GitHub)
tp.Create(&model.Ticket{
err = tp.Create(&model.Ticket{
Name: procedure.Name,
Body: fmt.Sprintf("%s\n\n\n---\nProcedure-ID: %s", procedure.Body, procedure.ID),
}, []string{"comply", "comply-procedure"})
if err != nil {
return err
}
return nil
}
}

View File

@@ -7,10 +7,17 @@ import (
)
var serveCommand = cli.Command{
Name: "serve",
Usage: "live updating version of the build command",
Name: "serve",
Usage: "live updating version of the build command",
Flags: []cli.Flag{
cli.IntFlag{
Name: "port",
Value: 4000,
Destination: &render.ServePort,
},
},
Action: serveAction,
Before: beforeAll(dockerMustExist, cleanContainers),
Before: beforeAll(pandocMustExist, cleanContainers),
}
func serveAction(c *cli.Context) error {

View File

@@ -1,6 +1,7 @@
package cli
import (
"github.com/strongdm/comply/internal/config"
"github.com/strongdm/comply/internal/model"
"github.com/urfave/cli"
)
@@ -13,8 +14,12 @@ var syncCommand = cli.Command{
}
func syncAction(c *cli.Context) error {
// TODO: unhardcode plugin
tp := model.GetPlugin(model.GitHub)
ts, err := config.Config().TicketSystem()
if err != nil {
return cli.NewExitError("error in ticket system configuration", 1)
}
tp := model.GetPlugin(model.TicketSystem(ts))
tickets, err := tp.FindByTagName("comply")
if err != nil {
return err

View File

@@ -1,6 +1,7 @@
package config
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
@@ -10,15 +11,54 @@ import (
var projectRoot string
var dockerAvailable, pandocAvailable bool
const (
Jira = "jira"
GitHub = "github"
GitLab = "gitlab"
NoTickets = "none"
)
const (
// UseDocker invokes pandoc within Docker
UseDocker = "docker"
// UsePandoc invokes pandoc directly
UsePandoc = "pandoc"
)
// SetProjectRoot is used by the test suite.
func SetProjectRoot(dir string) {
projectRoot = dir
}
type Project struct {
Name string `yaml:"name"`
FilePrefix string `yaml:"filePrefix"`
Tickets map[string]interface{} `yaml:"tickets"`
Name string `yaml:"name"`
Pandoc string `yaml:"pandoc,omitempty"`
FilePrefix string `yaml:"filePrefix"`
Tickets map[string]interface{} `yaml:"tickets"`
ApprovedBranch string `yaml:"approvedBranch"`
}
// SetPandoc records pandoc availability during initialization
func SetPandoc(pandoc bool, docker bool) {
pandocAvailable = pandoc
dockerAvailable = docker
}
// WhichPandoc indicates which pandoc invocation path should be used
func WhichPandoc() string {
cfg := Config()
if cfg.Pandoc == UsePandoc {
return UsePandoc
}
if cfg.Pandoc == UseDocker {
return UseDocker
}
if pandocAvailable {
return UsePandoc
}
return UseDocker
}
// YAML is the parsed contents of ProjectRoot()/config.yml.
@@ -42,14 +82,14 @@ func Exists() bool {
}
// Config is the parsed contents of ProjectRoot()/config.yml.
func Config() Project {
func Config() *Project {
p := Project{}
cfgBytes, err := ioutil.ReadFile(filepath.Join(ProjectRoot(), "comply.yml"))
if err != nil {
panic("unable to load config.yml: " + err.Error())
}
yaml.Unmarshal(cfgBytes, &p)
return p
return &p
}
// ProjectRoot is the fully-qualified path to the root directory.
@@ -64,3 +104,29 @@ func ProjectRoot() string {
return projectRoot
}
// TicketSystem indicates the type of the configured ticket system
func (p *Project) TicketSystem() (string, error) {
if len(p.Tickets) > 1 {
return NoTickets, errors.New("multiple ticket systems configured")
}
for k := range p.Tickets {
switch k {
case GitHub:
return GitHub, nil
case Jira:
return Jira, nil
case GitLab:
return GitLab, nil
case NoTickets:
return NoTickets, nil
default:
// explicit error for this case
return "", errors.New("unrecognized ticket system configured")
}
}
// no ticket block configured
return NoTickets, nil
}

218
internal/gitlab/gitlab.go Normal file
View File

@@ -0,0 +1,218 @@
package gitlab
import (
"fmt"
"strconv"
"sync"
"github.com/pkg/errors"
"github.com/strongdm/comply/internal/model"
"github.com/xanzy/go-gitlab"
)
const (
cfgDomain = "domain"
cfgToken = "token"
cfgRepo = "repo"
)
var prompts = map[string]string{
cfgDomain: "Fully Qualified GitLab Domain",
cfgToken: "GitLab Token",
cfgRepo: "GitLab Repository",
}
// Prompts are human-readable configuration element names
func (g *gitlabPlugin) Prompts() map[string]string {
return prompts
}
// Register causes the Github plugin to register itself
func Register() {
model.Register(model.GitLab, &gitlabPlugin{})
}
type gitlabPlugin struct {
domain string
token string
reponame string
clientMu sync.Mutex
client *gitlab.Client
}
func (g *gitlabPlugin) api() *gitlab.Client {
g.clientMu.Lock()
defer g.clientMu.Unlock()
if g.client == nil {
// get go-gitlab client
gl := gitlab.NewClient(nil, g.token)
gl.SetBaseURL(g.domain)
g.client = gl
}
return g.client
}
func (g *gitlabPlugin) Get(ID string) (*model.Ticket, error) {
return nil, nil
}
func (g *gitlabPlugin) Configured() bool {
return g.reponame != "" && g.token != ""
}
func (g *gitlabPlugin) Links() model.TicketLinks {
links := model.TicketLinks{}
links.AuditAll = fmt.Sprintf("%s/%s/issues?scope=all&utf8=✓&state=all&label_name[]=comply-audit", g.domain, g.reponame)
links.AuditOpen = fmt.Sprintf("%s/%s/issues?scope=all&utf8=✓&state=opened&label_name[]=comply-audit", g.domain, g.reponame)
links.ProcedureAll = fmt.Sprintf("%s/%s/issues?scope=all&utf8=✓&state=all&label_name[]=comply-procedure", g.domain, g.reponame)
links.ProcedureOpen = fmt.Sprintf("%s/%s/issues?scope=all&utf8=✓&state=opened&label_name[]=comply-procedure", g.domain, g.reponame)
return links
}
func (g *gitlabPlugin) Configure(cfg map[string]interface{}) error {
var err error
if g.domain, err = getCfg(cfg, cfgDomain); err != nil {
return err
}
if g.token, err = getCfg(cfg, cfgToken); err != nil {
return err
}
if g.reponame, err = getCfg(cfg, cfgRepo); err != nil {
return err
}
return nil
}
func getCfg(cfg map[string]interface{}, k string) (string, error) {
v, ok := cfg[k]
if !ok {
return "", errors.New("Missing key: " + k)
}
vS, ok := v.(string)
if !ok {
return "", errors.New("Malformatted key: " + k)
}
return vS, nil
}
func getProjectIssues(g *gitlabPlugin, options *gitlab.ListProjectIssuesOptions) ([]*gitlab.Issue, error) {
issues := []*gitlab.Issue{}
options.Page = 1
for {
pageIssues, resp, err := g.api().Issues.ListProjectIssues(g.reponame, options)
if err != nil {
return nil, errors.Wrap(err, "error retreiving issues from gitlab")
}
issues = append(issues, pageIssues...)
if resp.CurrentPage >= resp.TotalPages {
break
}
options.Page = resp.NextPage
}
return issues, nil
}
func (g *gitlabPlugin) FindOpen() ([]*model.Ticket, error) {
options := &gitlab.ListProjectIssuesOptions{
State: gitlab.String("opened"),
}
issues, err := getProjectIssues(g, options)
if err != nil {
return nil, errors.Wrap(err, "error during FindOpen")
}
return toTickets(issues), nil
}
func (g *gitlabPlugin) FindByTag(name, value string) ([]*model.Ticket, error) {
panic("not implemented")
}
func (g *gitlabPlugin) FindByTagName(name string) ([]*model.Ticket, error) {
options := &gitlab.ListProjectIssuesOptions{
State: gitlab.String("all"),
Labels: []string{name},
}
issues, err := getProjectIssues(g, options)
if err != nil {
return nil, errors.Wrap(err, "error during FindByTagName")
}
return toTickets(issues), nil
}
func (g *gitlabPlugin) LinkFor(t *model.Ticket) string {
panic("not implemented")
}
func (g *gitlabPlugin) Create(ticket *model.Ticket, labels []string) error {
l := gitlab.Labels(labels)
options := &gitlab.CreateIssueOptions{
Title: gitlab.String(ticket.Name),
Description: gitlab.String(ticket.Body),
Labels: &l,
}
_, _, err := g.api().Issues.CreateIssue(g.reponame, options)
return err
}
func toTickets(issues []*gitlab.Issue) []*model.Ticket {
var tickets []*model.Ticket
for _, i := range issues {
tickets = append(tickets, toTicket(i))
}
return tickets
}
func toTicket(i *gitlab.Issue) *model.Ticket {
t := &model.Ticket{Attributes: make(map[string]interface{})}
t.ID = strconv.Itoa(i.ID)
t.Name = i.Title
t.Body = i.Description
t.CreatedAt = i.CreatedAt
t.State = toState(i.State)
for _, l := range i.Labels {
if l == "audit" {
t.SetBool("comply-audit")
}
if l == "procedure" {
t.SetBool("comply-procedure")
}
// seems redundant, but fixes a bug the other two labels introduce
// whereby open comply tickets aren't properly accounted for in the UI
if l == "comply-audit" {
t.SetBool("comply-audit")
}
if l == "comply-procedure" {
t.SetBool("comply-procedure")
}
}
return t
}
func toState(state string) model.TicketState {
switch state {
case "closed":
return model.Closed
}
return model.Open
}
func ss(s *string) string {
if s == nil {
return ""
}
return *s
}

View File

@@ -0,0 +1,9 @@
package gitlab
import (
"testing"
)
func TestGitlab(t *testing.T) {
createOne()
}

193
internal/jira/jira.go Normal file
View File

@@ -0,0 +1,193 @@
package jira
import (
"fmt"
"sync"
"time"
"github.com/pkg/errors"
"github.com/strongdm/comply/internal/model"
jira "github.com/andygrunwald/go-jira"
)
const (
cfgUsername = "username"
cfgPassword = "password"
cfgURL = "url"
cfgProject = "project"
cfgTaskType = "taskType"
)
var prompts = map[string]string{
cfgUsername: "Jira Username",
cfgPassword: "Jira Password",
cfgURL: "Jira URL",
cfgProject: "Jira Project Code",
cfgTaskType: "Jira Task Type",
}
// Prompts are human-readable configuration element names
func (j *jiraPlugin) Prompts() map[string]string {
return prompts
}
// Register causes the Github plugin to register itself
func Register() {
model.Register(model.Jira, &jiraPlugin{})
}
type jiraPlugin struct {
username string
password string
url string
project string
taskType string
clientMu sync.Mutex
client *jira.Client
}
func (j *jiraPlugin) api() *jira.Client {
j.clientMu.Lock()
defer j.clientMu.Unlock()
if j.client == nil {
tp := jira.BasicAuthTransport{
Username: j.username,
Password: j.password,
}
client, _ := jira.NewClient(tp.Client(), j.url)
j.client = client
}
return j.client
}
func (j *jiraPlugin) Get(ID string) (*model.Ticket, error) {
return nil, nil
}
func (j *jiraPlugin) Configured() bool {
return j.username != "" && j.password != "" && j.url != "" && j.project != "" && j.taskType != ""
}
func (j *jiraPlugin) Links() model.TicketLinks {
links := model.TicketLinks{}
links.ProcedureAll = fmt.Sprintf("%s/issues/?jql=labels+=+comply-procedure", j.url)
links.ProcedureOpen = fmt.Sprintf("%s/issues/?jql=labels+=+comply-procedure+AND+resolution+=+Unresolved", j.url)
// links.AuditAll = fmt.Sprintf("%s/issues?q=is%3Aissue+is%3Aopen+label%3Acomply+label%3Aaudit", j.url)
// links.AuditOpen = fmt.Sprintf("%s/issues?q=is%3Aissue+is%3Aopen+label%3Acomply+label%3Aaudit", j.url)
return links
}
func (j *jiraPlugin) Configure(cfg map[string]interface{}) error {
var err error
if j.username, err = getCfg(cfg, cfgUsername); err != nil {
return err
}
if j.password, err = getCfg(cfg, cfgPassword); err != nil {
return err
}
if j.url, err = getCfg(cfg, cfgURL); err != nil {
return err
}
if j.project, err = getCfg(cfg, cfgProject); err != nil {
return err
}
if j.taskType, err = getCfg(cfg, cfgTaskType); err != nil {
return err
}
return nil
}
func getCfg(cfg map[string]interface{}, k string) (string, error) {
v, ok := cfg[k]
if !ok {
return "", errors.New("Missing key: " + k)
}
vS, ok := v.(string)
if !ok {
return "", errors.New("Malformatted key: " + k)
}
return vS, nil
}
func (j *jiraPlugin) FindOpen() ([]*model.Ticket, error) {
panic("not implemented")
}
func (j *jiraPlugin) FindByTag(name, value string) ([]*model.Ticket, error) {
panic("not implemented")
}
func (j *jiraPlugin) FindByTagName(name string) ([]*model.Ticket, error) {
issues, _, err := j.api().Issue.Search("labels=comply", &jira.SearchOptions{MaxResults: 1000})
if err != nil {
return nil, errors.Wrap(err, "unable to fetch Jira issues")
}
return toTickets(issues), nil
}
func (j *jiraPlugin) LinkFor(t *model.Ticket) string {
panic("not implemented")
}
func (j *jiraPlugin) Create(ticket *model.Ticket, labels []string) error {
i := jira.Issue{
Fields: &jira.IssueFields{
Type: jira.IssueType{
Name: j.taskType,
},
Project: jira.Project{
Key: j.project,
},
Summary: ticket.Name,
Description: ticket.Body,
Labels: labels,
},
}
_, _, err := j.api().Issue.Create(&i)
if err != nil {
return errors.Wrap(err, "unable to create ticket")
}
return nil
}
func toTickets(issues []jira.Issue) []*model.Ticket {
var tickets []*model.Ticket
for _, i := range issues {
tickets = append(tickets, toTicket(&i))
}
return tickets
}
func toTicket(i *jira.Issue) *model.Ticket {
t := &model.Ticket{Attributes: make(map[string]interface{})}
t.ID = i.ID
t.Name = i.Fields.Summary
t.Body = i.Fields.Description
createdAt := time.Time(i.Fields.Created)
t.CreatedAt = &createdAt
t.State = toState(i.Fields.Resolution)
for _, l := range i.Fields.Labels {
t.SetBool(l)
}
return t
}
func toState(status *jira.Resolution) model.TicketState {
if status == nil {
return model.Open
}
switch status.Name {
case "Done":
return model.Closed
}
return model.Open
}

View File

@@ -0,0 +1,9 @@
package jira
import (
"testing"
)
func TestJira(t *testing.T) {
createOne()
}

View File

@@ -2,7 +2,7 @@ package model
import "time"
type Policy struct {
type Document struct {
Name string `yaml:"name"`
Acronym string `yaml:"acronym"`

View File

@@ -91,8 +91,8 @@ func ReadStandards() ([]*Standard, error) {
}
// ReadNarratives loads narrative descriptions from the filesystem.
func ReadNarratives() ([]*Narrative, error) {
var narratives []*Narrative
func ReadNarratives() ([]*Document, error) {
var narratives []*Document
files, err := path.Narratives()
if err != nil {
@@ -100,7 +100,7 @@ func ReadNarratives() ([]*Narrative, error) {
}
for _, f := range files {
n := &Narrative{}
n := &Document{}
mdmd := loadMDMD(f.FullPath)
err = yaml.Unmarshal([]byte(mdmd.yaml), &n)
if err != nil {
@@ -141,8 +141,8 @@ func ReadProcedures() ([]*Procedure, error) {
}
// ReadPolicies loads policy documents from the filesystem.
func ReadPolicies() ([]*Policy, error) {
var policies []*Policy
func ReadPolicies() ([]*Document, error) {
var policies []*Document
files, err := path.Policies()
if err != nil {
@@ -150,7 +150,7 @@ func ReadPolicies() ([]*Policy, error) {
}
for _, f := range files {
p := &Policy{}
p := &Document{}
mdmd := loadMDMD(f.FullPath)
err = yaml.Unmarshal([]byte(mdmd.yaml), &p)
if err != nil {

View File

@@ -2,8 +2,8 @@ package model
type Data struct {
Standards []*Standard
Narratives []*Narrative
Policies []*Policy
Narratives []*Document
Policies []*Document
Procedures []*Procedure
Tickets []*Ticket
Audits []*Audit

View File

@@ -1,15 +0,0 @@
package model
import "time"
type Narrative struct {
Name string `yaml:"name"`
Acronym string `yaml:"acronym"`
Revisions []Revision `yaml:"majorRevisions"`
Satisfies Satisfaction `yaml:"satisfies"`
FullPath string
OutputFilename string
ModifiedAt time.Time
Body string
}

View File

@@ -17,11 +17,13 @@ type TicketSystem string
const (
// Jira from Atlassian.
Jira = TicketSystem("jira")
Jira = TicketSystem(config.Jira)
// GitHub from GitHub.
GitHub = TicketSystem("github")
GitHub = TicketSystem(config.GitHub)
// GitLab from GitLab.
GitLab = TicketSystem(config.GitLab)
// NoTickets indicates no ticketing system integration.
NoTickets = TicketSystem("none")
NoTickets = TicketSystem(config.NoTickets)
)
type TicketLinks struct {
@@ -50,6 +52,10 @@ func GetPlugin(ts TicketSystem) TicketPlugin {
tsPluginsMu.Lock()
defer tsPluginsMu.Unlock()
if ts == NoTickets {
return &noopTicketSystem{}
}
tp, ok := tsPlugins[ts]
if !ok {
panic("Unknown ticket system: " + ts)
@@ -81,7 +87,10 @@ func GetPlugin(ts TicketSystem) TicketPlugin {
}
cfgStringed[kS] = v
}
tp.Configure(cfgStringed)
err := tp.Configure(cfgStringed)
if err != nil {
panic(fmt.Sprintf("Configuration error `%s` in project YAML", err))
}
}
})
}
@@ -100,3 +109,36 @@ func Register(ts TicketSystem, plugin TicketPlugin) {
tsPlugins[ts] = plugin
}
type noopTicketSystem struct{}
func (*noopTicketSystem) Get(ID string) (*Ticket, error) {
return nil, nil
}
func (*noopTicketSystem) FindOpen() ([]*Ticket, error) {
return []*Ticket{}, nil
}
func (*noopTicketSystem) FindByTag(name, value string) ([]*Ticket, error) {
return []*Ticket{}, nil
}
func (*noopTicketSystem) FindByTagName(name string) ([]*Ticket, error) {
return []*Ticket{}, nil
}
func (*noopTicketSystem) Create(ticket *Ticket, labels []string) error {
return nil
}
func (*noopTicketSystem) Configure(map[string]interface{}) error {
return nil
}
func (*noopTicketSystem) Prompts() map[string]string {
return make(map[string]string)
}
func (*noopTicketSystem) Links() TicketLinks {
return TicketLinks{}
}
func (*noopTicketSystem) LinkFor(ticket *Ticket) string {
return ""
}
func (*noopTicketSystem) Configured() bool {
return false
}

View File

@@ -3,7 +3,9 @@ package github
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"sync"
"github.com/google/go-github/github"
@@ -69,10 +71,10 @@ func (g *githubPlugin) Configured() bool {
func (g *githubPlugin) Links() model.TicketLinks {
links := model.TicketLinks{}
links.AuditAll = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%3Aissue+is%3Aopen+label%3Acomply+label%3Aaudit", g.username, g.reponame)
links.AuditOpen = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%3Aissue+is%3Aopen+label%3Acomply+label%3Aaudit", g.username, g.reponame)
links.ProcedureAll = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%3Aissue+label%3Acomply+label%3Aprocedure", g.username, g.reponame)
links.ProcedureOpen = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%3Aissue+is%3Aopen+label%3Acomply+label%3Aprocedure", g.username, g.reponame)
links.AuditAll = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%%3Aissue+is%%3Aopen+label%%3Acomply+label%%3Aaudit", g.username, g.reponame)
links.AuditOpen = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%%3Aissue+is%%3Aopen+label%%3Acomply+label%%3Aaudit", g.username, g.reponame)
links.ProcedureAll = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%%3Aissue+label%%3Acomply+label%%3Acomply-procedure", g.username, g.reponame)
links.ProcedureOpen = fmt.Sprintf("https://github.com/%s/%s/issues?q=is%%3Aissue+is%%3Aopen+label%%3Acomply+label%%3Acomply-procedure", g.username, g.reponame)
return links
}
@@ -95,7 +97,10 @@ func (g *githubPlugin) Configure(cfg map[string]interface{}) error {
func getCfg(cfg map[string]interface{}, k string) (string, error) {
v, ok := cfg[k]
if !ok {
return "", errors.New("Missing key: " + k)
v = os.Getenv(fmt.Sprintf("GITHUB_%s", strings.ToUpper(k)))
if v == "" {
return "", errors.New("Missing key: " + k)
}
}
vS, ok := v.(string)
@@ -135,7 +140,8 @@ func (g *githubPlugin) FindByTagName(name string) ([]*model.Ticket, error) {
}
func (g *githubPlugin) LinkFor(t *model.Ticket) string {
return fmt.Sprintf("https://github.com/strongdm/comply/issues/%s", t.ID)
// return fmt.Sprintf("https://github.com/strongdm/comply/issues/%s", t.ID)
panic("not implemented")
}
func (g *githubPlugin) Create(ticket *model.Ticket, labels []string) error {

View File

@@ -5,6 +5,7 @@ import (
"sort"
"time"
"github.com/pkg/errors"
"github.com/strongdm/comply/internal/config"
"github.com/strongdm/comply/internal/model"
)
@@ -32,8 +33,8 @@ type renderData struct {
Name string
Project *project
Stats *stats
Narratives []*model.Narrative
Policies []*model.Policy
Narratives []*model.Document
Policies []*model.Document
Procedures []*model.Procedure
Standards []*model.Standard
Tickets []*model.Ticket
@@ -93,8 +94,12 @@ func load() (*model.Data, *renderData, error) {
rd.Name = project.OrganizationName
rd.Controls = controls
// TODO: unhardcode plugin
tp := model.GetPlugin(model.GitHub)
ts, err := config.Config().TicketSystem()
if err != nil {
return nil, nil, errors.Wrap(err, "error in ticket system configuration")
}
tp := model.GetPlugin(model.TicketSystem(ts))
if tp.Configured() {
links := tp.Links()
rd.Links = &links
@@ -133,7 +138,7 @@ func addStats(modelData *model.Data, renderData *renderData) {
}
if t.State == model.Open {
if t.Bool("procedure") {
if t.Bool("comply-procedure") {
stats.ProcedureOpen++
if t.CreatedAt != nil {
age := int(time.Since(*t.CreatedAt).Hours() / float64(24))

View File

@@ -2,7 +2,6 @@ package render
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
@@ -12,89 +11,34 @@ import (
"text/template"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"os/exec"
"github.com/pkg/errors"
"github.com/strongdm/comply/internal/config"
"github.com/strongdm/comply/internal/model"
)
// TODO: refactor and eliminate duplication among narrative, policy renderers
func renderPolicyToDisk(wg *sync.WaitGroup, errOutputCh chan error, data *renderData, policy *model.Policy, live bool) {
func renderToFilesystem(wg *sync.WaitGroup, errOutputCh chan error, data *renderData, doc *model.Document, live bool) {
// only files that have been touched
if !isNewer(policy.FullPath, policy.ModifiedAt) {
if !isNewer(doc.FullPath, doc.ModifiedAt) {
return
}
recordModified(policy.FullPath, policy.ModifiedAt)
ctx := context.Background()
cli, err := client.NewEnvClient()
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to read Docker environment")
return
}
pwd, err := os.Getwd()
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to get workding directory")
return
}
hc := &container.HostConfig{
Binds: []string{pwd + ":/source"},
}
recordModified(doc.FullPath, doc.ModifiedAt)
wg.Add(1)
go func(p *model.Policy) {
go func(p *model.Document) {
defer wg.Done()
outputFilename := p.OutputFilename
// save preprocessed markdown
err = preprocessPolicy(data, p, filepath.Join(".", "output", outputFilename+".md"))
err := preprocessDoc(data, p, filepath.Join(".", "output", outputFilename+".md"))
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to preprocess")
return
}
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: "strongdm/pandoc",
Cmd: []string{"--smart", "--toc", "-N", "--template=/source/templates/default.latex", "-o",
fmt.Sprintf("/source/output/%s", outputFilename),
fmt.Sprintf("/source/output/%s.md", outputFilename),
},
}, hc, nil, "")
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to create Docker container")
return
}
defer func() {
timeout := 2 * time.Second
cli.ContainerStop(ctx, resp.ID, &timeout)
err := cli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to remove container")
return
}
}()
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
errOutputCh <- errors.Wrap(err, "unable to start Docker container")
return
}
_, err = cli.ContainerWait(ctx, resp.ID)
if err != nil {
errOutputCh <- errors.Wrap(err, "error awaiting Docker container")
return
}
_, err = cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true})
if err != nil {
errOutputCh <- errors.Wrap(err, "error reading Docker container logs")
return
}
pandoc(outputFilename, errOutputCh)
// remove preprocessed markdown
err = os.Remove(filepath.Join(".", "output", outputFilename+".md"))
@@ -108,10 +52,44 @@ func renderPolicyToDisk(wg *sync.WaitGroup, errOutputCh chan error, data *render
rel = p.FullPath
}
fmt.Printf("%s -> %s\n", rel, filepath.Join("output", p.OutputFilename))
}(policy)
}(doc)
}
func preprocessPolicy(data *renderData, pol *model.Policy, fullPath string) error {
func getGitApprovalInfo(pol *model.Document) (string, error) {
cfg := config.Config()
// if no approved branch specified in config.yaml, then nothing gets added to the document
if cfg.ApprovedBranch == "" {
return "", nil
}
// Decide whether we are on the git branch that contains the approved policies
gitBranchArgs := []string{"rev-parse", "--abbrev-ref", "HEAD"}
gitBranchCmd := exec.Command("git", gitBranchArgs...)
gitBranchInfo, err := gitBranchCmd.CombinedOutput()
if err != nil {
fmt.Println(string(gitBranchInfo))
return "", errors.Wrap(err, "error looking up git branch")
}
// if on a different branch than the approved branch, then nothing gets added to the document
if strings.Compare(strings.TrimSpace(fmt.Sprintf("%s", gitBranchInfo)), cfg.ApprovedBranch) != 0 {
return "", nil
}
// Grab information related to commit, so that we can put approval information in the document
gitArgs := []string{"log", "-n", "1", "--pretty=format:Last edit made by %an (%aE) on %aD.\n\nApproved by %cn (%cE) on %cD in commit %H.", "--", pol.FullPath}
cmd := exec.Command("git", gitArgs...)
gitApprovalInfo, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(gitApprovalInfo))
return "", errors.Wrap(err, "error looking up git committer and author data")
}
return fmt.Sprintf("%s\n%s", "# Authorship and Approval", gitApprovalInfo), nil
}
func preprocessDoc(data *renderData, pol *model.Document, fullPath string) error {
cfg := config.Config()
var w bytes.Buffer
@@ -147,6 +125,11 @@ func preprocessPolicy(data *renderData, pol *model.Policy, fullPath string) erro
revisionTable = fmt.Sprintf("|Date|Comment|\n|---+--------------------------------------------|\n%s\nTable: Document history\n", rows)
}
gitApprovalInfo, err := getGitApprovalInfo(pol)
if err != nil {
return err
}
doc := fmt.Sprintf(`%% %s
%% %s
%% %s
@@ -162,6 +145,8 @@ foot-content: "%s confidential %d"
%s
\newpage
%s
%s`,
pol.Name,
cfg.Name,
@@ -172,6 +157,7 @@ foot-content: "%s confidential %d"
satisfiesTable,
revisionTable,
body,
gitApprovalInfo,
)
err = ioutil.WriteFile(fullPath, []byte(doc), os.FileMode(0644))
if err != nil {

View File

@@ -15,7 +15,10 @@ import (
const websocketReloader = `<script>
(function(){
var ws = new WebSocket("ws://localhost:5122/ws")
var ws = new WebSocket("ws://localhost:%d/ws")
if (location.host != "") {
ws = new WebSocket("ws://"+location.host+"/ws")
}
var connected = false
ws.onopen = function(e) {
connected = true
@@ -74,7 +77,7 @@ func html(output string, live bool, errCh chan error, wg *sync.WaitGroup) {
}
if live {
w.Write([]byte(websocketReloader))
w.Write([]byte(fmt.Sprintf(websocketReloader, ServePort)))
}
w.Close()
}
@@ -82,7 +85,7 @@ func html(output string, live bool, errCh chan error, wg *sync.WaitGroup) {
if live {
if !opened {
opened = true
open.Run("output/index.html")
open.Run(fmt.Sprintf("http://127.0.0.1:%d/", ServePort))
}
} else {
wg.Done()

View File

@@ -1,184 +0,0 @@
package render
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"text/template"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/strongdm/comply/internal/config"
"github.com/strongdm/comply/internal/model"
)
// TODO: refactor and eliminate duplication among narrative, policy renderers
func renderNarrativeToDisk(wg *sync.WaitGroup, errOutputCh chan error, data *renderData, narrative *model.Narrative, live bool) {
// only files that have been touched
if !isNewer(narrative.FullPath, narrative.ModifiedAt) {
return
}
recordModified(narrative.FullPath, narrative.ModifiedAt)
ctx := context.Background()
cli, err := client.NewEnvClient()
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to read Docker environment")
return
}
pwd, err := os.Getwd()
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to get workding directory")
return
}
hc := &container.HostConfig{
Binds: []string{pwd + ":/source"},
}
wg.Add(1)
go func(p *model.Narrative) {
defer wg.Done()
outputFilename := p.OutputFilename
// save preprocessed markdown
err = preprocessNarrative(data, p, filepath.Join(".", "output", outputFilename+".md"))
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to preprocess")
return
}
cmd := []string{"--smart", "--toc", "-N", "--template=/source/templates/default.latex", "-o",
fmt.Sprintf("/source/output/%s", outputFilename),
fmt.Sprintf("/source/output/%s.md", outputFilename)}
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: "strongdm/pandoc",
Cmd: cmd},
hc, nil, "")
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to create Docker container")
return
}
defer func() {
timeout := 2 * time.Second
cli.ContainerStop(ctx, resp.ID, &timeout)
err := cli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to remove container")
return
}
}()
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
errOutputCh <- errors.Wrap(err, "unable to start Docker container")
return
}
_, err = cli.ContainerWait(ctx, resp.ID)
if err != nil {
errOutputCh <- errors.Wrap(err, "error awaiting Docker container")
return
}
_, err = cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true})
if err != nil {
errOutputCh <- errors.Wrap(err, "error reading Docker container logs")
return
}
// remove preprocessed markdown
err = os.Remove(filepath.Join(".", "output", outputFilename+".md"))
if err != nil {
errOutputCh <- err
return
}
rel, err := filepath.Rel(config.ProjectRoot(), p.FullPath)
if err != nil {
rel = p.FullPath
}
fmt.Printf("%s -> %s\n", rel, filepath.Join("output", p.OutputFilename))
}(narrative)
}
func preprocessNarrative(data *renderData, pol *model.Narrative, fullPath string) error {
cfg := config.Config()
var w bytes.Buffer
bodyTemplate, err := template.New("body").Parse(pol.Body)
if err != nil {
w.WriteString(fmt.Sprintf("# Error processing template:\n\n%s\n", err.Error()))
} else {
bodyTemplate.Execute(&w, data)
}
body := w.String()
revisionTable := ""
satisfiesTable := ""
// ||Date|Comment|
// |---+------|
// | 4 Jan 2018 | Initial Version |
// Table: Document history
if len(pol.Satisfies) > 0 {
rows := ""
for standard, keys := range pol.Satisfies {
rows += fmt.Sprintf("| %s | %s |\n", standard, strings.Join(keys, ", "))
}
satisfiesTable = fmt.Sprintf("|Standard|Controls Satisfied|\n|-------+--------------------------------------------|\n%s\nTable: Control satisfaction\n", rows)
}
if len(pol.Revisions) > 0 {
rows := ""
for _, rev := range pol.Revisions {
rows += fmt.Sprintf("| %s | %s |\n", rev.Date, rev.Comment)
}
revisionTable = fmt.Sprintf("|Date|Comment|\n|---+--------------------------------------------|\n%s\nTable: Document history\n", rows)
}
doc := fmt.Sprintf(`%% %s
%% %s
%% %s
---
header-includes: yes
head-content: "%s"
foot-content: "%s confidential %d"
---
%s
%s
\newpage
%s`,
pol.Name,
cfg.Name,
fmt.Sprintf("%s %d", pol.ModifiedAt.Month().String(), pol.ModifiedAt.Year()),
pol.Name,
cfg.Name,
time.Now().Year(),
satisfiesTable,
revisionTable,
body,
)
err = ioutil.WriteFile(fullPath, []byte(doc), os.FileMode(0644))
if err != nil {
return errors.Wrap(err, "unable to write preprocessed narrative to disk")
}
return nil
}

101
internal/render/pandoc.go Normal file
View File

@@ -0,0 +1,101 @@
package render
import (
"context"
"fmt"
"os"
"os/exec"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/strongdm/comply/internal/config"
)
var pandocArgs = []string{"-f", "markdown+smart", "--toc", "-N", "--template", "templates/default.latex", "-o"}
func pandoc(outputFilename string, errOutputCh chan error) {
if config.WhichPandoc() == config.UsePandoc {
err := pandocPandoc(outputFilename)
if err != nil {
errOutputCh <- err
}
} else {
dockerPandoc(outputFilename, errOutputCh)
}
}
func dockerPandoc(outputFilename string, errOutputCh chan error) {
pandocCmd := append(pandocArgs, fmt.Sprintf("/source/output/%s", outputFilename), fmt.Sprintf("/source/output/%s.md", outputFilename))
ctx := context.Background()
cli, err := client.NewEnvClient()
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to read Docker environment")
return
}
pwd, err := os.Getwd()
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to get workding directory")
return
}
hc := &container.HostConfig{
Binds: []string{pwd + ":/source"},
}
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: "strongdm/pandoc",
Cmd: pandocCmd},
hc, nil, "")
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to create Docker container")
return
}
defer func() {
timeout := 2 * time.Second
cli.ContainerStop(ctx, resp.ID, &timeout)
err := cli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
if err != nil {
errOutputCh <- errors.Wrap(err, "unable to remove container")
return
}
}()
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
errOutputCh <- errors.Wrap(err, "unable to start Docker container")
return
}
_, err = cli.ContainerWait(ctx, resp.ID)
if err != nil {
errOutputCh <- errors.Wrap(err, "error awaiting Docker container")
return
}
_, err = cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true})
if err != nil {
errOutputCh <- errors.Wrap(err, "error reading Docker container logs")
return
}
if _, err = os.Stat(fmt.Sprintf("output/%s", outputFilename)); err != nil && os.IsNotExist(err) {
errOutputCh <- errors.Wrap(err, "output not generated; verify your Docker image is up to date")
return
}
}
// 🐼
func pandocPandoc(outputFilename string) error {
cmd := exec.Command("pandoc", append(pandocArgs, fmt.Sprintf("output/%s", outputFilename), fmt.Sprintf("output/%s.md", outputFilename))...)
outputRaw, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(outputRaw))
return errors.Wrap(err, "error calling pandoc")
}
return nil
}

View File

@@ -25,7 +25,7 @@ func pdf(output string, live bool, errCh chan error, wg *sync.WaitGroup) {
return
}
for _, policy := range policies {
renderPolicyToDisk(&pdfWG, errOutputCh, data, policy, live)
renderToFilesystem(&pdfWG, errOutputCh, data, policy, live)
}
narratives, err := model.ReadNarratives()
@@ -35,7 +35,7 @@ func pdf(output string, live bool, errCh chan error, wg *sync.WaitGroup) {
}
for _, narrative := range narratives {
renderNarrativeToDisk(&pdfWG, errOutputCh, data, narrative, live)
renderToFilesystem(&pdfWG, errOutputCh, data, narrative, live)
}
pdfWG.Wait()

View File

@@ -1,8 +1,10 @@
package render
import (
"fmt"
"net/http"
"os"
"path/filepath"
"sync"
"time"
@@ -11,6 +13,8 @@ import (
"github.com/yosssi/ace"
)
var ServePort int
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
@@ -87,6 +91,16 @@ func Build(output string, live bool) error {
if live {
watch(errCh)
go func() {
http.Handle("/", http.FileServer(http.Dir(filepath.Join(".", "output"))))
err := http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", ServePort), nil)
if err != nil {
panic(err)
}
}()
fmt.Printf("Serving content of output/ at http://127.0.0.1:%d (ctrl-c to quit)\n", ServePort)
}
// PDF
wg.Add(1)

View File

@@ -45,7 +45,6 @@ func watch(errCh chan error) {
}
http.HandleFunc("/ws", serveWs)
go http.ListenAndServe("127.0.0.1:5122", nil)
return
}

View File

@@ -137,7 +137,7 @@ func complyBlankReadmeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-blank/README.md", size: 1965, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-blank/README.md", size: 1965, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -157,7 +157,7 @@ func complyBlankTodoMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-blank/TODO.md", size: 1429, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-blank/TODO.md", size: 1429, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -277,7 +277,7 @@ func complyBlankTemplatesDefaultLatex() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-blank/templates/default.latex", size: 7649, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-blank/templates/default.latex", size: 7649, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -297,7 +297,7 @@ func complyBlankTemplatesIndexAce() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-blank/templates/index.ace", size: 7596, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-blank/templates/index.ace", size: 7596, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -317,7 +317,7 @@ func complySoc2ReadmeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/README.md", size: 1965, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/README.md", size: 1965, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -337,7 +337,7 @@ func complySoc2TodoMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/TODO.md", size: 1429, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/TODO.md", size: 1429, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -357,12 +357,12 @@ func complySoc2NarrativesReadmeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/narratives/README.md", size: 96, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/narratives/README.md", size: 96, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2NarrativesControlMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\x31\x4f\xc3\x30\x10\x85\x77\xff\x8a\x27\x31\x07\x91\xb4\x95\x50\x56\xab\x12\x20\xe8\x40\xbb\x30\x9a\xe4\xa0\x47\xe3\x3b\x64\x3b\xa9\xf2\xef\x91\x23\x14\x99\x89\xc9\x4f\x9f\xdf\xb3\xdf\x9d\x38\x4f\x2d\xac\x4a\x0a\x3a\x60\x2f\x13\x07\x15\x4f\x92\x70\x70\x21\xb8\xc4\x13\x19\xd7\x05\x95\xd9\xb7\xb0\xfb\x83\x89\x2e\x71\xfc\x60\x8a\xad\x01\x4e\x47\x9b\x0f\xa0\x82\xb5\xcd\x6d\x5d\xe8\xa6\xd0\x9b\x55\x6f\x0b\xcf\xb6\xf0\xec\x0a\xbe\xfb\xc3\x37\xc6\xbb\x2f\x0d\xaf\x34\x71\x64\x95\xe5\xdb\x0a\xbd\x4b\xd4\xe2\x69\x14\xd4\x68\xee\xea\xfb\x25\xd0\xa9\xcf\xcd\x5b\x3c\x0a\x27\x76\x03\x7a\xed\xc6\x4c\x4c\x55\x55\xc6\xdc\xfc\x33\xa6\x79\xa0\x40\xb8\x12\x64\x41\x84\xeb\x79\x86\x8e\x01\xdd\x6f\x8c\x8a\xd8\xba\x06\xa4\x33\xad\x8e\x0b\xcd\x11\x03\xc7\x44\x3d\x58\x96\xab\xb7\x97\x67\xbc\x0f\xda\x5d\x72\x81\x13\xf9\xef\x21\x3f\x6d\xd5\xb3\x7c\xe2\xa8\x2a\xe6\x27\x00\x00\xff\xff\x49\x83\xc0\x0b\x83\x01\x00\x00")
var _complySoc2NarrativesControlMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x56\x4d\x6f\x1b\xc9\x11\xbd\xcf\xaf\x28\x40\xc0\x26\x01\x44\x26\xf6\xee\x02\x81\x6e\x0a\xa5\x04\x0e\xbc\x96\x60\x09\xde\x83\x91\x43\xb1\xa7\x86\xac\xa8\xa7\x6b\xb6\xab\x9b\xf2\x64\xe1\xff\x1e\x54\xf7\x70\x38\xb4\x2d\x38\x97\x9c\x38\xec\x9e\xa9\x8f\xf7\x5e\x7d\x04\xec\xe9\x0a\x36\x12\x52\x14\x0f\xb7\xe1\xc0\x51\x42\x4f\x21\xc1\x3b\x8c\x11\x13\x1f\xa8\x41\x17\x25\x8c\xfd\x15\x6c\x6e\xdf\x35\x8a\x89\xb5\x63\xd2\xab\x06\xe0\xf1\x61\x63\x3f\x00\x2b\xd8\x6c\x5e\xaf\x5f\x2d\x9e\x5f\x2f\x9e\x7f\x9c\x9f\x7f\x5a\xbc\xf3\xd3\xe2\x9d\x9f\x17\xe7\x3f\x9f\x9d\xff\xd8\xf4\xf8\x6f\x89\xef\xe9\xc0\xca\x12\x8a\xdb\x15\xb4\x98\xe8\x0a\xfe\x99\x03\xbc\x82\xd7\x7f\x79\xf5\xd7\xf2\x81\x93\xde\x22\xbf\x82\x37\x81\x13\xa3\x87\x56\x5c\xb6\x93\x66\xb5\x5a\x35\xcd\xc5\x77\xd2\x6c\x1e\xf7\x04\x9d\x78\x2f\xcf\x1c\x76\x30\x44\x39\x70\x4b\x0a\x08\x2d\xa9\x8b\x3c\x24\x96\x00\xd2\x41\xda\x13\xb8\xc9\x94\xa6\x98\x5d\xca\x91\xec\xe2\xf7\xdf\xd7\xef\xb0\xa7\xcf\x9f\xd7\xd5\x18\x87\x64\x2e\xca\x27\xac\x67\x66\x58\x21\x09\x50\xc8\x3d\x45\x4c\x54\x6c\x7a\xd9\xb1\x43\x7f\x09\x83\x78\x76\xe3\x25\x60\x68\x2d\x0c\x47\x6d\x8e\xe8\x8f\x3e\x15\xd2\x1e\x13\x28\xc5\x03\x99\x91\x5e\x02\x27\x89\x27\xef\x7f\x50\xc0\x61\xf0\xec\xb0\xb8\x32\x2b\x2d\x26\x04\x25\x97\x23\xa7\x71\x0d\x9b\x3d\x86\x1d\x29\xe4\xe0\xe4\x40\x91\x5a\xd8\x8e\x16\x82\xd2\xec\x8f\x14\x38\x7c\x3b\xac\x53\x48\x97\x20\x11\x5c\xd6\x24\x3d\x45\xa0\x05\xac\x18\x09\xb0\x6d\x23\xa9\x56\xeb\x91\x7a\x6a\xb9\x44\xa4\xa0\x03\x39\xee\xd8\x59\xf8\xe6\x22\x48\xa2\x16\x5c\x89\x6a\x6d\x4c\xbd\xad\x3e\x8f\x8c\x69\xd3\xcc\xd9\x01\xf5\x83\x97\x51\x41\xe9\x40\x06\xcb\x14\xdf\x02\x1e\xb1\x10\x13\xb9\x64\x67\x1d\xb7\x14\xaa\x1c\x0c\x04\x43\x83\x82\x1a\x63\x41\x62\x8f\x1e\x64\x30\x0a\x26\x6e\x39\x29\x38\x89\x05\x87\x36\xbb\xb4\x6e\x9a\x15\xfc\x82\xa1\xc5\x24\x71\xac\x26\x28\xb8\x38\x56\x1a\x31\x41\x24\x4d\xc5\x2a\x07\xe8\xc5\x4e\xed\x8b\xec\x13\xaf\x3a\x74\x46\x0c\xe6\xb4\xb7\x10\x26\x3e\x3a\x3b\x72\x8e\xb4\x44\xea\xbc\x64\xfb\xb6\x8b\x38\x6b\xa9\x59\xc1\xb5\x4b\x7c\xe0\x34\x16\xcb\x18\xa4\x47\x3f\x1e\x99\x36\x75\x4a\x38\x46\x68\x26\x75\xd4\x44\xbd\x36\x2b\xf8\x90\x7d\xa0\x88\x5b\xf6\xf6\x71\x8f\x01\x77\x54\x08\x19\xa2\xec\x22\xf6\x06\xee\x7d\xe1\xf1\x7f\xc2\xb6\x52\xfe\x7f\x82\x16\x1e\xab\xe2\xcc\x07\x17\xbd\x39\x9f\x5b\xba\x84\x6d\xae\x02\x0a\x92\xc0\x73\xcf\x26\x8e\x24\x57\x4d\xc1\xa5\x00\x77\xac\xe4\x9a\x4a\xb3\x82\xdb\x13\x27\xf3\xd9\x5d\xd7\xb1\x23\x78\x98\x64\x7f\xba\xb8\x47\xd5\x67\x89\xed\xe2\xa4\xe6\xf9\x18\x91\x83\xc1\x3b\x5f\x7c\xa0\xd0\x4a\x3c\xfd\xff\x55\xe2\x93\x26\x5c\x3a\x32\x44\x4f\x45\xfa\x2d\x54\xf7\xa8\x50\x2a\x5d\xb2\x82\xba\x3d\xb5\xd9\x53\xbb\x2c\xb5\x45\x19\x1b\x8a\x29\x87\xda\x12\xa8\xeb\xc8\x94\x40\xc1\xb2\x96\x0e\x24\xec\xc4\x02\x3c\xd6\xf2\x4c\x4d\xed\x16\x56\xe4\xd1\xb0\x94\x0e\xe8\x40\x21\xad\xda\x68\x5f\x7f\xe1\x2b\x92\x0e\x62\x7e\x64\x36\xb4\x8a\xe4\xd1\x80\x2e\x9f\xa9\x75\xb0\xbb\x9b\xbb\x2b\xf8\x3b\x07\xf4\xfc\x1f\x9a\xda\x83\x67\x4d\xda\x34\x17\x17\xf0\x30\xe7\x31\x03\x6c\x11\x5c\xe7\x96\xd3\x0c\x08\xa9\x91\x66\xcd\x9b\x9e\x8f\xdc\x7d\xfc\x2d\x63\x4c\x14\xfd\xf8\xaf\xd3\xdd\x6c\xe3\xad\xec\x14\x3e\x3e\x13\x3d\x9d\xdd\x6f\xc6\x2d\x45\x78\xcf\xfa\x04\xd7\xaa\xa4\x5a\x44\xfd\xc7\x53\x03\x1d\x44\x95\xb7\xde\x3a\x73\x3f\x44\xe9\x59\x09\xd4\x51\xc0\xc8\xa2\x7f\xfa\xb6\xd3\x1b\x53\xed\xc6\xa3\xaa\x35\xa3\x4a\xea\xf9\x8b\x7f\x43\xf7\x94\x07\x78\x24\x4d\x86\xfa\xf9\xe5\x0d\x2b\x6a\xb2\xb0\xa8\xb4\xd1\xf1\xf4\x9e\x52\xcf\x2b\x0c\x21\xa3\x5f\xfa\xa3\x03\x3b\x52\xf8\x61\x29\xa3\x17\x00\xf9\x01\x36\x9e\x30\xc2\x5b\x79\x5e\xdd\x47\x96\x02\xce\xb5\xa7\x98\xce\xe0\xb9\x1e\x06\x3f\xc2\xdd\x03\xdc\x63\x72\x7b\x52\xf8\xd8\x4b\x48\xfb\x7a\xf9\x81\x22\x77\x63\x4d\xf3\x86\x75\x10\xb5\x92\x2e\x01\xdb\x60\x3a\x69\xf8\x8b\x10\x36\x12\xac\x40\x4f\xa4\xcc\x75\xf1\xf1\xab\x9c\xe6\x77\x7e\x39\x35\xa7\xa2\x03\x0b\xd5\xfe\x6c\xac\x4f\xec\x72\xfc\x16\xbc\xf7\x14\x28\x4d\x37\x86\xdd\xd2\xfe\xaf\x7b\x4e\xb4\x95\x4f\x27\x0f\x93\xc7\xc5\x3b\x0f\x77\x9b\xd7\x93\xe2\xe6\x53\xd3\xe6\x6d\x91\xfe\x4d\x95\xfe\x77\xe5\x79\x17\xb6\x82\xb1\x85\xdb\xd2\xfc\x88\x6a\xeb\xf8\xea\xec\x4d\x38\x18\xbd\x3b\x53\xdb\x6c\xb3\xa4\xf9\xd2\xe5\x9b\xe0\x4a\x87\xb4\x16\xf1\x7e\x31\xff\x96\xbd\x21\xab\x95\xe4\x9e\x40\x72\x72\xd2\xd7\xd2\xb5\xff\xd8\x49\x2c\xad\x9b\x25\xd8\x70\x3c\xb6\xe0\xe5\x52\x50\xab\xb9\x76\xe1\x6e\x04\xdd\x4b\x34\x23\x1c\x76\xf3\xf4\xa6\x4f\x5c\x35\x79\xdc\x58\x16\x53\x7a\x0d\x77\xc1\xd1\xf1\x7b\xa6\xf6\x72\xaa\xf2\xa3\x21\xdb\x7e\x22\xcd\xb3\xbb\x4e\x72\xee\xcb\x6a\x14\x76\x5f\xd9\xfe\x32\xb8\xda\x93\x5c\x24\x2c\x6f\x05\x7a\x7e\x31\x0d\xeb\x90\x44\x2d\xb5\xeb\xba\xa8\xf5\x7d\x0e\x53\x49\x9e\xc1\xe5\xe6\x1b\x52\x88\xe4\xe9\x80\x21\xd9\xf8\xb4\x81\x53\x74\x14\x69\x87\xb1\x35\x7f\x96\x7e\x97\x43\x19\x91\x65\x66\x4e\xc0\x6e\xe5\x40\xa7\x40\x9e\x39\xed\xcb\xa6\x16\x03\xfa\x3a\xc3\x3e\x4d\x7f\x06\x8c\xa9\x34\x53\x5b\xa2\x00\x75\x55\x43\x84\x2d\x2a\xd7\x0c\xd0\x39\x99\x9c\x09\x58\x41\xe7\xb2\x26\x44\xfa\x2d\x73\x65\xcf\x3a\xe9\xc5\x05\xbc\x99\x1c\xbc\x98\xcb\x91\x9e\xa3\x0c\x2e\xa7\xa9\xcf\x47\x18\xcf\x16\xa8\x63\xbc\x7e\x84\xac\x73\xae\xf3\xee\x6a\x9b\x54\x20\xaf\x65\x64\x3e\x78\x74\x4f\x36\x22\x7b\x64\xdf\xac\xe0\x1f\x9c\xf6\x79\x0b\x89\xdd\x13\x19\x2f\xb5\x62\x3e\x7d\x27\xbe\x19\xeb\x29\xd0\x79\x5e\x2c\xb1\xb7\x95\xf6\x4b\xf0\xea\x54\x2f\x33\x6b\x8f\x91\xf6\xe2\x5b\x8a\x7a\x39\xaf\x8d\xf6\x68\x26\xcb\xae\xa4\x97\xc6\x5f\xf6\x58\x9f\x2d\xef\x9d\xb5\xd6\xba\x54\x9a\x4c\x8b\xcd\x59\x2d\xe7\x04\x1c\xed\xe4\x89\xc8\xd9\xd4\xf8\xe7\x13\x37\xb2\xf5\x56\xa6\x2c\x61\xfd\xdf\x00\x00\x00\xff\xff\xd5\x1b\xb6\x85\xfd\x0c\x00\x00")
func complySoc2NarrativesControlMdBytes() ([]byte, error) {
return bindataRead(
@@ -377,7 +377,7 @@ func complySoc2NarrativesControlMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/narratives/control.md", size: 387, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/narratives/control.md", size: 3325, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -397,12 +397,12 @@ func complySoc2NarrativesOrganizationalMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/narratives/organizational.md", size: 2378, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/narratives/organizational.md", size: 2378, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2NarrativesProductsMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x44\xcd\xb1\x4e\x03\x31\x10\x84\xe1\xde\x4f\x31\x12\xf5\x49\x84\x0a\xb9\xa5\x01\x8a\x28\xe2\x78\x81\x8d\x3d\x0a\x4b\xe2\xdd\x68\xed\x3b\x74\x3c\x3d\x4a\x81\xd2\x8e\x46\xff\x67\xd2\x98\x71\x08\xaf\x4b\x19\x1d\x62\x15\x33\x63\xd5\xc2\x8e\xbd\x44\xc8\xd0\x95\x49\x4a\xb8\x6d\x2d\xe3\x30\xef\x53\x93\x6f\x8f\x0f\xae\xda\xd5\xad\xe7\x04\x4c\xa8\x32\x98\xf1\xbe\x18\x76\x78\x7a\xdc\x3d\x27\x00\x28\xde\x1a\x6d\x64\xbc\x99\x0e\x95\x0b\xaa\x97\xe5\xb6\xa4\x69\x9a\x52\x7a\xb8\xb3\x77\x29\xbd\x32\x88\x1f\xa2\xb2\x97\xd0\x23\x31\xbe\x88\x33\x37\x5c\xff\xcf\x4d\xe2\xcc\xc1\x8a\xe3\x06\x5f\x02\x1e\x27\x31\xfd\x95\xa1\x6e\xb7\xea\x27\xdb\xf5\x22\x83\x78\xf1\xa6\x76\xc2\xec\x6e\xe9\x2f\x00\x00\xff\xff\x2c\x6f\xb1\xc2\xe8\x00\x00\x00")
var _complySoc2NarrativesProductsMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x52\xcb\x6e\xdb\x40\x10\xbb\xef\x57\x10\xf0\x35\x0e\x9a\x9e\x0a\xdf\xd2\xf6\xd0\x14\x69\x62\xd8\x5f\xb0\x5e\xd1\xf2\xd4\xd2\xae\x30\xb3\xb2\xa1\x7e\x7d\x21\xd5\x7a\x24\xcd\x51\x1c\x8a\x1c\x72\x27\xfa\x9a\x1b\x6c\x35\x15\x6d\xc8\x06\x1f\x0b\xec\xa9\x17\x09\x34\xbc\x78\x55\x9f\xe5\x42\xe7\x83\xa6\xd8\xd5\x1b\x6c\xf7\x2f\xae\xf6\xbf\x93\xee\x78\x11\x93\x14\x6d\xe3\x80\x35\x0a\x9f\xb9\xc1\xcf\x36\xe2\x01\x9f\x3f\x3d\x7c\x71\x00\x10\x52\x5d\x33\xe6\x0d\x9e\xa2\x64\xf1\x15\x8a\x14\xda\x1e\x71\xeb\xf5\xda\xb9\xd5\x6c\x3b\x3b\xb9\x1f\x54\xe2\x4a\x14\xb4\xa0\x72\x20\xf2\x89\x38\xb3\x43\x33\x92\x6b\xaf\x67\x66\x16\x38\x74\x48\xad\x22\x69\xe9\xa3\xfc\xf1\x59\x52\x5c\xaa\x3a\xb7\x9a\x3e\xf0\xe0\xdc\xeb\xa5\x0f\xc6\x2b\xd2\x71\x14\xeb\xe1\xd5\x6a\x85\x47\x0d\x27\xc9\x0c\xb9\x55\x3a\xf7\x55\x85\x47\xf8\x19\xeb\x57\x17\x0b\xad\xf5\x89\x3f\xf8\x7d\xcf\xd0\xaa\xe4\x0e\xdf\x52\x34\x29\xa8\xc3\x2e\xe6\xdc\xbe\x61\x90\xa3\x04\xd8\xc8\x08\x6f\x18\x38\x26\x9d\xc5\xee\xb1\xe3\x91\x8a\x9c\xd0\xa4\x4a\x82\xd0\xee\xfa\x69\x60\xd1\x2a\x0d\x27\x2a\xef\xfb\x84\x03\x8d\x31\xf0\x5f\xc6\xa9\x3d\x73\xee\x59\x2c\x43\x59\xf1\xe2\x63\x46\x9c\x26\x83\xd0\xc1\x1f\xaa\x0e\x12\x43\xd5\x16\x12\x4b\xf7\xba\x68\xce\x57\x8b\x57\x98\x02\x2d\xa0\xce\x32\xeb\xe5\x4b\xf5\xed\xde\xd6\x7c\xef\xfb\x66\xfd\xf7\xae\x8f\x4d\x53\x49\x18\x4c\xe7\xe6\x06\xa5\xce\x7d\xf7\xd9\x07\xc6\x4c\x1d\x91\xe7\x54\xe2\x97\x8f\xbe\x64\x7f\x39\x23\xba\xf5\x66\xd7\xa4\xc5\xf8\x3d\xe9\x3c\xc5\x20\x45\x4f\xdc\xd1\x9a\x14\x8d\x23\x63\x27\x76\xc6\xa3\x19\xcd\x96\x42\xb7\x1b\xb9\x55\xfc\x5f\x8e\x69\xf2\x51\x12\xf8\x10\x68\x06\x65\x7f\x56\x77\x68\x7c\x0e\x27\x89\xe5\x1d\x7c\x45\xcd\xa8\x53\x94\x9c\x74\x40\xaa\x54\xce\x3c\x46\x64\x5a\x96\x58\xfe\x0d\x00\x00\xff\xff\x5a\x81\xee\x7e\x7f\x03\x00\x00")
func complySoc2NarrativesProductsMdBytes() ([]byte, error) {
return bindataRead(
@@ -417,12 +417,12 @@ func complySoc2NarrativesProductsMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/narratives/products.md", size: 232, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/narratives/products.md", size: 895, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2NarrativesSecurityMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xb1\x4e\xc3\x40\x0c\x86\xf7\x7b\x8a\x5f\x62\x4e\x45\x3b\xb4\x28\x1b\x8a\x90\x00\x41\x07\xd2\x85\xf1\xb8\x98\xc6\x34\x67\x23\x9f\xd3\x2a\x6f\x8f\x92\xa1\xea\xc6\xe4\x4f\xbf\xe5\xcf\xb6\xc4\x4c\x35\x5a\x4a\xa3\xb1\x4f\x78\xb4\xd4\xb3\x53\xf2\xd1\x08\xfb\x68\x16\x9d\xcf\x14\x62\x32\x95\x29\xd7\x68\x9f\xf6\xa1\x44\xe7\xf2\xcd\x54\xea\x00\x1c\xda\x66\x2e\x40\x85\xa6\xd9\xae\xb6\x37\xbc\xbb\xf2\x6e\xb5\xbe\xe1\x4d\xc8\xf1\x47\xed\x83\xce\x5c\x58\x65\xd1\x54\xe8\xa2\x53\x8d\xd7\x51\xb0\xc6\xe6\x7e\xfd\xb0\x0c\x24\xcd\x99\xc4\x6b\xbc\x08\x3b\xc7\x01\x9d\xa6\x71\x4e\x42\x55\x55\x21\xdc\xfd\x7b\x78\x78\x26\x23\x5c\x08\xb2\x44\x84\x4b\x3f\x41\x47\x83\xda\x11\xd7\x47\xe0\x3d\x21\xa9\xb8\xe9\x80\x13\x4d\x05\x03\x17\xa7\x0e\x2c\x4b\xeb\xf3\xfd\x0d\x5f\x83\xa6\xd3\xbc\xf2\x40\xf9\x77\x98\x55\x8d\x66\x96\x23\x5a\x55\x09\x7f\x01\x00\x00\xff\xff\x71\xb6\xff\x0e\x47\x01\x00\x00")
var _complySoc2NarrativesSecurityMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x57\x5f\x6f\xe3\xc6\x11\x7f\xe7\xa7\x18\xc0\x40\x0a\x38\x96\xae\x97\x87\xa4\xf0\x9b\xaa\xbb\x22\x6e\x7d\x67\xc3\x32\x72\xe8\xe3\x88\x1c\x89\x5b\x2d\x77\xd8\x99\xa5\x54\x26\xca\x77\x2f\x66\x97\x94\x68\xdd\xf9\x82\xb4\xc8\x9d\x01\x2d\x97\xcb\xf9\xf7\x9b\xf9\xcd\x6c\xc0\x86\x6e\x61\x45\x65\x27\x2e\xf6\xb0\x90\xb2\x76\x91\xca\xd8\x09\xc1\x47\x14\xc1\xe8\xf6\x54\x60\x29\x1c\xfa\xe6\x16\x56\xef\x3f\x16\x8a\xd1\xe9\xc6\x91\xde\x16\x00\xcf\xab\xa5\xfd\x00\xcc\x60\xb9\xfc\x7e\xfe\xfd\x64\xfd\xc3\x69\xfd\xc3\xfc\xed\x64\xfd\x5d\xd1\xe0\xbf\x58\x9e\x68\xef\xd4\x71\x48\x62\x66\x50\x61\xa4\x5b\xf8\x7b\x17\xe0\x2d\x7c\xf7\xe7\xb7\x7f\x49\x1f\x94\xdc\x34\x14\xe2\x2d\xdc\x05\x17\x1d\x7a\xa8\xb8\xec\x6c\xa7\x98\xcd\x66\x45\x71\xf5\x9b\x86\x17\x3f\x92\x10\x1c\x08\x42\xda\x22\x38\xd4\x3d\x70\x27\xc0\xb2\x85\x93\x23\x10\x6b\x82\x92\x43\x14\xf6\xb0\xa3\x5e\xc1\x3b\x8d\x54\x81\x0b\xe9\xd5\x3f\x3f\xdc\xc3\xda\x73\xb9\x33\x95\xbf\xfc\x32\xff\x88\x0d\xfd\xfa\x2b\x3c\x0a\x57\x5d\x19\x5f\xe8\x2e\x8a\x77\xa4\xa5\xb8\x35\x41\x3b\xbc\xc6\xa9\x69\x35\x09\xdd\x00\x35\x6d\x8d\xea\x7e\x76\x61\x0b\x3a\xba\xe0\x9a\xd6\xbb\x12\xa3\xc5\xe4\xa5\xa2\xbb\xb0\x11\xd4\x28\xdd\xa0\xe2\xea\xea\xa4\xfb\xf2\xd5\x67\xda\xdd\x8b\x03\xaf\xa8\x6e\x08\xb5\x13\x32\xb5\x57\x57\xb0\xe8\x62\xcd\xe2\x7e\xa6\x0a\x1e\x49\x94\x43\x20\x5f\x14\x33\xb8\xbe\x5e\x7c\x5a\x81\x30\x47\xc0\xb2\xe4\x2e\xc4\xeb\x6b\x5b\x91\x2a\x38\x85\xad\x60\xb0\xa0\x71\xf0\x3d\x44\x4e\x91\x5b\x3e\x3f\x00\x86\x0a\x96\xef\x1f\x4e\x02\xee\x16\x1f\xbe\xf8\x9d\x7d\xc2\x80\xe0\x5d\xe3\xec\x79\x2b\xdc\xb5\xc0\x1b\xb8\xbe\x7e\x68\x49\x30\xb2\xe8\xf5\x75\x12\x73\x0e\xcd\x6a\xf5\xe3\x6b\xc2\x7e\xa7\xa4\x77\x7f\xfd\x5d\x82\xde\x61\x44\x98\x4a\x33\x54\xee\x9e\x3f\x03\xe4\xac\xa0\xd3\x21\xd3\x36\xec\x3d\x1f\x0c\x80\xd2\x73\x57\x81\x92\xec\x5d\x49\x0a\x1b\x16\x70\x51\xc1\x85\x48\x12\xd0\x5f\x80\x77\x6b\x20\xdc\x3b\x8d\x17\xdf\x15\xc5\x22\x9b\x9d\x83\xae\x74\x29\xd7\xe9\xc9\x03\x03\x4e\x2a\xd3\x3d\x20\x24\xec\xc9\x3c\xb2\xf5\xd9\x56\x6a\x5a\xcf\x3d\x51\x02\xcf\x29\x08\xed\x1d\x1d\xa8\x82\x7f\x77\x28\x91\xc4\xf7\x80\x0a\x07\xf2\xde\x7e\xf7\x0e\x41\x68\xdb\x79\x14\xe0\xb0\x66\x4c\x1a\xde\xf0\x66\x33\xae\x21\xa2\xee\xb2\x83\x81\x0e\x49\x6a\x45\x2d\x4a\xb4\x77\xa3\x32\x9d\xbf\x4c\xfb\x4f\x2c\x3b\x8d\x63\x41\x9c\xf7\x0f\x93\x7d\x40\x2b\x2a\x94\x8a\x82\x79\xb7\x45\x17\x34\x82\xe7\xad\x2b\xd1\x27\x3d\x6d\xdd\x6b\x7e\x88\x11\xcb\x1d\xac\xfb\x0b\x10\xc6\xe4\x4f\xe1\xe5\x84\x68\x2a\x8e\x5e\x23\x35\xd0\x74\x1a\x61\x4d\x70\x70\xb1\x76\x01\x38\x10\x6c\x29\xa4\x43\x1c\x2c\x70\x65\x27\x92\x18\x09\x36\x9d\xf7\xb3\xca\xe9\x0e\x28\x94\xd2\xb7\x76\xc2\x44\xe6\x88\x00\x86\xe8\xf6\x4e\x3a\x7d\x63\xab\x06\xfd\xc1\x6c\x57\xde\x44\x5b\x14\x33\x78\x58\x25\x83\x17\x3f\x01\x76\x91\x1b\x8c\x66\xb6\xef\xa1\x6b\x8d\x1c\xab\xa2\x98\x04\xc4\xa8\xb1\xf5\x0e\x43\x99\x4d\x1b\x80\x1f\x7d\x31\xcc\x68\x8f\xbe\xc3\x5c\x91\x80\x13\xe4\xd6\xa8\x2e\xc5\xfa\x0a\x9e\xa8\xe1\x48\x90\xf3\xa7\x28\x3e\x60\xe8\xbf\x90\x05\x9a\x42\x0e\x92\x0e\xfb\x3e\xcb\x1b\x01\x4f\xd2\x92\xe1\xa5\x31\x45\x19\x2d\xb3\x06\xf6\x31\x43\x53\x06\x8d\x09\x7d\xf7\x3c\xc4\x35\x27\x8d\x01\xa1\xd8\x98\xdd\xb1\xe6\x4a\x2d\x99\x62\xcd\x4a\x13\xd5\x83\x54\x83\x64\x23\xdc\x5c\x24\xea\x09\x5d\xde\x6c\x5c\x49\x37\xe0\xe6\x34\xbf\x81\xca\x89\x59\x32\xe0\x90\xf3\x7e\xa8\x90\x97\xb5\x31\x87\xbb\x68\xc1\x32\xa9\xa3\xce\x3f\x59\xbe\x6b\xcb\x41\xdd\xda\x79\xe3\xc7\xc8\x40\xc1\x02\x0b\xb1\xc6\x98\x19\x0e\xcf\x2c\xd9\x8e\x2c\x69\x35\x3e\x31\x4e\x48\xb9\x13\xab\x40\x0b\xc2\x60\xc2\xf9\xf5\x10\x89\x94\xf5\x43\x05\x3f\xa5\x3a\x9b\x16\xf4\xf9\xf8\x25\x93\xaf\x39\xd6\xe7\xc8\xa6\x54\xcf\x51\xbf\x79\xad\x62\x13\x12\x58\x5a\x67\x34\x4b\x25\x57\x8f\xc1\xba\xa7\x6a\x0e\x8b\x60\x67\xb8\x41\x6f\x3d\x31\xbf\x6a\x59\x46\x6e\x36\xac\xc6\x86\x11\x09\x9b\x54\xcd\x9b\x4e\x62\x4d\x02\x2e\xec\x49\xa3\xdb\xa6\xe4\x9c\xc3\xa7\x9a\xc2\x04\x43\x8d\x28\x11\x58\x86\xa2\xbf\x01\x0c\xaf\x11\x45\x2b\x5c\x52\x65\xa1\x76\x3a\x14\x69\xd6\xdf\x0a\xe7\x81\x61\x90\x73\x7a\xc4\xb6\x15\x6e\xc5\x59\x7b\x1f\xfa\xd2\x10\xeb\x14\xd9\x47\x0a\x14\x87\x72\x7d\x36\x1b\xc3\x76\x4a\x26\x36\x62\x38\xd5\xcc\x25\x01\xe8\x3f\x43\x3c\xdb\xc9\x67\x91\x34\xa6\xa4\xb7\xbf\xd0\xa1\x1f\x4a\x08\x16\xde\xc3\xc6\x05\x33\x3c\x07\xcc\x35\x0d\x55\x66\x89\xef\xcf\x08\x24\xf0\xab\x4a\x48\x75\x12\xca\x34\x7c\x60\x39\xd2\xc8\xd0\x2d\xdf\x2c\xdf\x3f\x5c\xd0\xe0\xe3\x98\xe2\xe3\xb0\x33\x35\xbf\x46\x4d\x9c\x74\xaa\x03\xcf\x79\x84\xb8\xb1\xe1\x65\x85\x01\xfe\x26\x18\x4a\xa7\x25\xdf\xc0\x72\x31\x87\x7f\x50\x0f\x4e\xb5\x4b\xcc\x61\x89\x2f\x58\xee\xa8\x1a\x69\xf1\x21\x95\xd1\xe7\x3a\xe1\x91\xbd\x2b\x7b\xb8\xa7\x6a\x4b\x32\x1f\xcf\xa5\x61\xc9\x1c\xc7\xaa\x72\xa6\x36\x11\x56\x4d\xfe\x24\xd0\x93\x2a\xcb\x8d\xc1\xd7\x92\xd8\xac\x81\x01\xb7\x64\x43\xdc\x4d\x66\x8d\x4e\x23\x57\x36\xdc\x69\xc4\xcd\x66\x0e\xcf\x89\xc9\x4e\x92\x03\xc7\xff\xd5\xc8\x73\x98\xb2\x52\xd1\x91\xb5\x4e\xf0\x4c\xda\x43\x2e\xb9\x56\xdc\xde\x79\xda\xa6\x6e\xf4\x5a\xf1\xa5\x8e\x6a\x81\xa6\x6a\x6c\x0c\x8b\x4f\xab\xa9\xc2\x8a\x49\x93\xe9\x35\xee\xe9\x33\x25\x91\xed\xfc\x85\xd0\x04\xfb\x93\xb5\x8f\x85\x2a\xa9\xa6\x39\x77\x3a\x3f\xa4\x36\xa0\x69\x42\x58\xf6\x6b\x92\xcb\xc3\x5f\x48\x51\x4b\x02\x96\x8a\xc4\x54\xee\x88\x5a\x68\x71\xd2\x31\x80\xf6\xec\xf7\xa9\x3f\xd7\x42\x18\xc1\x63\xa8\xb4\xc4\x96\x12\x0c\x93\x26\x99\x28\x3e\x55\x79\x88\x2c\xbd\x65\x2c\x56\x7b\x12\x45\x71\x03\xfb\x04\x0e\xb3\xe9\x5e\x16\x69\x9c\xae\xa7\xcc\x5f\xa7\x59\xc3\x35\x46\x2b\x29\x03\xa7\x0c\x97\x5b\xd2\x62\x22\xe2\x39\x8b\x28\x8a\x97\xc6\x08\xb5\x42\x4a\x21\x66\xde\x7e\xd5\xaa\xc1\x82\xdb\xa2\x38\x66\x49\xc7\x55\xe2\xe3\xe3\x4f\x54\x46\x96\xe3\x33\xca\x96\xe2\xf1\xde\xed\xc8\xbb\x9a\xb9\x3a\xae\x68\x4f\x96\x49\xc7\xe2\x38\xfb\xca\xbf\x6f\xbf\xf2\xf8\xed\xd7\x4e\x0f\xcb\x63\x71\x84\xc9\xff\xe4\xf7\x47\x0e\xb3\xff\xd3\xf7\x57\x10\x98\xf8\xff\x47\x3a\xfe\x5b\x7e\xa6\xf4\xa6\x0d\x09\x85\x32\x5f\x35\xce\x17\x35\x2d\x8a\xe1\x46\x93\x7b\xe5\x6a\x1c\x5d\xcf\x57\xb9\x55\x9e\xc7\x5e\xbb\xe8\xd9\x9d\xc8\xaa\xdf\x99\xac\xf7\xa7\xf9\x6b\xa0\x84\xe2\x9e\xb7\xf0\xe1\xc4\x3c\xe3\xee\x40\x24\x17\xfc\x51\xbc\x98\x8a\xc6\xcd\xd3\xa1\xbb\x50\xba\xca\x84\x3c\xe5\x21\x81\xc6\x13\xd3\xf1\x6c\xd8\x1a\xae\x6a\xb9\xa3\xd9\xa0\xde\xb6\xbe\xb7\x41\xef\x11\x63\x59\x93\x16\xb9\xe1\xc3\x37\xb0\xf4\x84\x02\xf7\x7c\x98\x3d\x8a\xe3\x7c\xa7\xf5\x24\xf1\x74\x64\x98\xd1\x86\xa7\x77\x94\xe3\xf3\xcd\xcb\x29\xf9\xbf\x01\x00\x00\xff\xff\xa2\xda\x57\x64\xcf\x0f\x00\x00")
func complySoc2NarrativesSecurityMdBytes() ([]byte, error) {
return bindataRead(
@@ -437,7 +437,7 @@ func complySoc2NarrativesSecurityMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/narratives/security.md", size: 327, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/narratives/security.md", size: 4047, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -457,7 +457,7 @@ func complySoc2NarrativesSystemMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/narratives/system.md", size: 257, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/narratives/system.md", size: 257, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -477,12 +477,12 @@ func complySoc2PoliciesReadmeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/README.md", size: 71, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/README.md", size: 71, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesAccessMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4c\x8e\x31\x6a\xc4\x30\x10\x45\x7b\x9d\xe2\x43\x6a\x85\xd8\x81\x10\xd4\x19\x55\x49\x63\x13\xfb\x02\x13\x49\xbb\xcc\x62\xcd\x80\x24\x2f\xf8\xf6\x8b\x5d\xb9\xfa\x8f\x0f\x0f\x9e\x50\x4e\x0e\x43\x08\xa9\x56\x8c\xf2\xaf\x54\x22\xcb\x1d\x24\x11\x4b\x2a\x99\x85\x1a\xab\x60\xd2\x95\xc3\x6e\x28\x14\x95\x3d\x3b\x0c\xe3\x32\x99\x4a\x8d\xeb\x8d\x53\x75\x06\x58\x66\x7f\x0c\x60\xe1\xfd\xd7\x7b\x77\xe1\xfe\xc2\x9f\x26\xd3\x43\xcb\x5f\x7a\x72\x65\x95\x53\xb5\x88\xd4\x92\xc3\xef\x26\xe8\xd0\x7f\x74\xdf\xa7\x10\x34\xe7\x24\xcd\xe1\x47\xb8\x31\xad\x88\x1a\xb6\xe3\x31\xd6\x5a\x63\xde\xe0\x35\x1f\xad\xb3\xaa\xbc\x02\x00\x00\xff\xff\xcd\x46\x92\x04\xc8\x00\x00\x00")
var _complySoc2PoliciesAccessMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x55\xc1\x6e\x1b\x47\x0c\xbd\xeb\x2b\x08\xe4\xd2\x1a\x91\x5a\xa7\x40\x51\xe8\x96\xba\x05\xec\x02\x45\x0c\x45\x40\xce\xd4\x2c\x57\xcb\x68\x76\xb8\x20\x67\xa5\xca\xa7\xfe\x46\x7f\xaf\x5f\x52\x70\x66\x25\xcb\x6e\xec\xe4\xa4\xdd\x19\x91\x7c\x8f\xef\x91\x9b\xb0\xa7\x25\xbc\x0f\x81\xcc\xe0\x43\xda\x08\x6a\xc3\x69\x0b\x98\x1a\x58\x93\xf6\x9c\x30\xb3\x24\xb8\x97\xc8\xe1\x38\xc3\xa0\x92\x8e\xfd\x12\xde\x7f\x58\xdf\xcf\x0c\x33\x5b\xcb\x64\xcb\x19\xc0\xfa\xe3\x8d\xff\x00\xcc\xe1\xe6\xe6\xe7\xc5\xf5\xc5\xf3\xbb\x8b\xe7\x9f\x66\x3d\x7e\x16\x5d\xd1\x9e\x8d\x25\x95\xd0\x39\x34\x98\x69\x09\x7f\x8c\x09\xae\xe1\xdd\x8f\xd7\xbf\x94\x80\x20\x7d\x4f\x29\x2f\xe1\x2e\x71\x66\x8c\xd0\x48\x18\xfd\x64\x36\x9f\xcf\x67\x6f\xe0\x7e\xd4\x41\x8c\x0a\xd6\x8f\x41\x06\x9a\xcd\x70\x01\xeb\x8e\x60\x98\x6e\xa4\x85\xdc\xb1\xc1\x50\xd0\x03\x1b\x64\x81\x86\x5a\x4e\x04\x83\x4a\xa0\x66\x54\x2a\x87\x52\xa9\x97\x5c\xd2\xb6\xf5\x65\x34\xd2\x72\x9b\x29\x74\x89\x03\x46\xe0\xd4\x2a\x5a\xd6\x31\xe4\x51\x09\x38\x01\x42\x8f\x29\x91\x42\xee\x30\x43\xcf\x89\x7b\x7e\xf0\xa4\x1d\x81\xb2\xed\x1c\x04\xa7\x56\xb4\xaf\x9d\x8c\x62\x06\xa2\x40\x7f\x0d\x62\xa3\xd2\x02\x26\xd8\x8f\x38\x71\x18\x22\x57\x5c\x18\xe3\xcb\xd5\x0f\x9c\x3b\x4e\xa5\x92\xe8\x16\x13\x3f\x94\x12\x5f\xcf\xd8\x8e\x31\xce\x33\xf7\xb5\x77\x03\x6a\xae\x6f\xd4\x0f\x51\x8e\x44\x56\xce\x83\xa4\xac\x18\xb2\xa8\x79\xca\x37\xf0\x2b\x86\xdd\x56\x65\x4c\x4d\x29\x70\x97\x40\xb4\x71\xe6\x72\xe6\xfd\xcd\xb4\xe1\xbb\x56\xa5\x87\x8d\xe4\x0e\x38\x19\x37\x15\x8b\x8c\xb9\x3c\x3f\xe7\xf4\xfd\xdb\xff\x1d\xb9\x9c\x4a\x91\x31\x65\x90\xda\x85\x41\x39\x05\x1e\x62\x51\x3e\x12\x5a\xf6\xa3\x3d\x47\xda\xd2\xc2\x5d\x2e\x63\xca\x10\x94\x6a\x82\x42\xde\x4d\x6e\x56\x20\xd2\x9e\xa2\x01\x2a\x81\x92\x65\xe5\x90\xa9\xa9\xe6\x88\xc7\x4a\x8c\x4c\x46\x0d\xde\x9f\x8d\x49\x1c\x33\xc5\x23\x24\xa2\xa6\xfe\x6f\x20\x75\xc2\x40\x18\x3a\x7f\x31\x49\xff\xfe\xfd\x8f\xc1\x67\xd9\x40\x33\x66\x26\x5b\xc0\xa7\x8e\xdc\x33\x6e\xad\x72\xa7\x12\x5f\x14\x12\x42\x87\x69\x4b\xe6\xdc\x8b\xd1\x2b\x01\x7b\x05\x78\x8d\x68\x7e\x50\xda\xcb\xae\xa2\x6a\x39\x97\xcc\x89\x0e\xb5\x9a\x47\x37\x6c\xb8\x89\xd4\xc0\xc1\xf1\xf8\xb5\x23\xf2\x9e\xed\x27\xef\x3e\x01\x82\x31\xcb\x96\x72\x47\x5a\x9d\x30\xad\x03\x77\xc1\xd5\x6f\xa3\xfa\xc6\x90\xf3\xf2\x58\x5e\xcd\xca\xfc\xf2\x02\x6e\xb9\xdc\xfd\x89\x09\xb7\xa4\x93\x21\x0c\x6e\x57\x30\x0e\x92\xa0\x63\x2d\x52\x61\x01\x77\x72\xdf\xe2\x31\x7c\x05\xd4\x23\x47\x83\xbb\xb5\x53\xa9\xf1\x0e\xaf\x7f\x0c\x2b\x49\x9c\x53\xee\x88\xb5\x50\x74\x90\x53\x8a\xbb\x75\xd5\xdb\x35\x83\xd0\x51\xd8\x45\xb6\x5c\xa2\x5f\xef\xe6\x24\x6b\x2b\xd3\x64\x3f\xcb\xeb\x6b\x46\x0e\x3e\xf7\xd2\x56\xc1\x4f\xe6\x00\xa5\x3d\xd3\xa1\xe6\xc5\x61\x50\xf1\x96\xe2\x97\xcc\xe7\x7d\x9e\xa1\x99\x04\x46\xf7\xda\x23\x0a\x7b\x4a\xe1\x20\xba\xb3\x62\x93\xaa\xcd\x97\x0b\x67\x01\xa3\x0c\xe3\x70\x16\x74\x5a\x05\x67\x8d\xa6\xcd\xf6\xaa\x48\x49\x32\xfb\x52\xf7\xee\x17\x77\x60\x3a\x4b\x03\x1d\x1a\x6c\xc8\x2d\x33\x7d\x1c\xa8\xb9\x40\x7a\xbb\x02\xa3\xd4\x78\xab\x0f\x44\xbb\x78\xac\xf2\x81\xd2\x20\x9a\x1d\xdf\xdd\x1a\x6c\xec\x7b\x54\x7e\xf0\x9a\x27\x2d\xa6\x4d\x7b\xce\x59\xba\xc3\xa9\xae\xba\x93\xfa\x93\x67\x27\x9d\xb1\x7c\xb3\x9e\xb6\xe9\x9c\xc0\xa6\xeb\xd3\x64\xb5\xbc\x27\xd8\x8c\xc6\xc9\x0f\x1b\x3c\x1a\x94\xf5\xa3\x14\x88\x87\x02\xa1\xd2\x0e\x97\x1b\xf4\xea\xd3\x73\xfa\xd3\x40\x16\x33\xd8\x4b\x63\xfb\x72\x6b\x0f\x1c\xe3\xc9\xc4\xb7\xab\x6a\xe1\x9a\xd2\xbf\x24\xc5\x61\x25\xf2\xa2\x9f\xde\x08\x37\x80\x47\xb6\x12\xa3\x1c\x4a\x3d\xc3\x9e\xc0\x32\x0d\x06\x68\xbe\x35\x23\x27\xf2\x96\x55\x34\x4f\x3f\xe4\x17\xb2\x5f\x7c\xf6\x4e\x24\x57\xc5\xae\xaf\x4c\x84\x5d\x10\xfa\xdd\x0d\xd7\x4b\xca\xdd\x5b\x87\xe5\x7f\x74\x9b\x38\xba\x6a\xfb\xaf\x8d\x95\xcf\x13\x86\x30\x2a\x86\xa3\x23\x98\xfd\x17\x00\x00\xff\xff\x61\xc4\x99\x8a\x82\x08\x00\x00")
func complySoc2PoliciesAccessMdBytes() ([]byte, error) {
return bindataRead(
@@ -497,7 +497,7 @@ func complySoc2PoliciesAccessMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/access.md", size: 200, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/access.md", size: 2178, mode: os.FileMode(420), modTime: time.Unix(1545087106, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -517,12 +517,12 @@ func complySoc2PoliciesApplicationMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/application.md", size: 8377, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/application.md", size: 8377, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesAvailabilityMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\x8b\xc1\xaa\xc2\x30\x10\x45\xf7\xf9\x8a\x0b\x6f\x9d\xf2\xe2\x4a\xb3\x2b\x59\xe9\xaa\x58\x7f\x60\x4c\xa3\x8c\x24\x33\xd0\xa4\x85\xfe\xbd\xb4\xb8\xba\x97\xc3\x39\x42\x25\x79\xf4\x2b\x71\xa6\x27\x67\x6e\x1b\x06\xcd\x1c\x37\x43\x71\x56\xd9\x8a\x47\x3f\x98\x4a\x8d\xeb\x8b\x53\xf5\x06\x78\x8c\x61\x1f\xc0\xa2\x77\x9d\xfb\xdd\x10\x2e\x9d\x33\x85\x3e\x3a\xdf\xd3\xca\x95\x55\x0e\xdb\x62\xa2\x96\x3c\x6e\x8b\xc0\xe1\xf4\xef\xce\x47\x10\xb5\x94\x24\xcd\xe3\x2a\xdc\x98\x32\x26\x8d\xcb\x4e\x8c\xb5\xd6\x98\x3f\x04\x2d\x2c\x6f\x8c\xaa\xf2\x0d\x00\x00\xff\xff\xcb\x6e\xaa\x43\xa4\x00\x00\x00")
var _complySoc2PoliciesAvailabilityMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x41\x6f\x5b\xb9\x11\xbe\xeb\x57\x0c\xb0\x17\x2b\x6b\xab\x71\x92\x43\x6b\xa0\x07\xaf\x8b\xa2\x2e\xbc\xdb\x20\x76\xb1\xe7\x11\xdf\x48\x8f\x35\x1f\xf9\xc2\x21\xad\xbc\x20\x87\xfc\x8d\x02\xed\x9f\xcb\x2f\x29\x66\xc8\x27\x3d\x59\x72\x92\x5d\x6c\xb1\x7b\x58\x5d\x6c\x51\xe4\x70\xf8\xcd\x37\xdf\x0c\xe9\xb1\xa3\x0b\xb8\x7c\x40\xeb\x70\x69\x9d\x4d\x03\xbc\x0e\xce\x9a\x61\x86\x26\x06\x3f\x74\x17\x70\xf9\x7a\xc6\x98\x2c\xaf\x2c\xf1\xc5\x0c\xe0\xee\xf6\x4a\xfe\x00\x9c\xc1\xe5\xf9\xe2\xbc\xfe\x7b\x75\xf5\xa7\xc5\xf9\xac\xc3\x7f\x85\xf8\x86\x1e\x2c\xdb\xe0\x75\xf6\x19\x34\x98\xe8\x02\xfe\x9e\x3d\x9c\xc3\x8b\xe7\xe7\x7f\xd4\x05\x26\x74\x1d\xf9\x74\x01\xd7\xde\x26\x8b\x0e\x9a\x60\xb2\x8c\xcc\xce\xce\xce\x66\xb3\x6f\xe0\x75\x8e\x7d\x60\x02\xf4\x0d\xdc\x9a\xd0\xd3\x6c\x86\x0b\xb8\x6b\x09\xfa\xfa\x4b\x58\x41\x6a\x2d\x43\xaf\xfe\x82\x65\x48\x01\x1a\x5a\x59\x4f\x10\xe9\x6d\xb6\x91\xc4\x20\xc3\x2a\x44\xe8\x63\xe8\x29\x82\x09\x3e\xc5\xe0\x74\x6a\x1f\x43\x22\x93\x20\xb5\x04\x38\x05\x40\x0d\x13\x84\xb8\x46\x6f\xdf\x63\xb2\xc1\x7f\xfa\xf8\x6f\x06\xeb\x57\x21\x76\xfa\x1d\x78\xe0\x44\x1d\x2f\xaa\x57\x3b\x37\xb0\xef\x9d\x25\xdd\x00\x9d\x83\xcc\x14\x59\x2c\x1e\x59\x0c\x1b\x9b\x5a\xeb\x0f\x36\xab\x06\xd3\xd0\x5b\x83\xce\x0d\x60\xbd\x71\xb9\x21\x06\xea\x7a\x17\x06\x22\x56\x58\xf4\x30\x68\x52\x88\x7c\x0a\xc8\xb0\x21\xe7\xe4\x2f\xfa\x01\xe8\x5d\xa2\xe8\xd1\x41\x8f\x31\xa9\x3f\x2d\x26\x41\x9d\xc0\xfa\x14\x74\x2d\x9a\xa4\x2e\x6c\xfd\x11\xa3\x53\x3f\x2b\x5a\x8e\x1a\x58\x0e\x07\x6e\xc2\x49\x4b\x91\xac\xc7\x55\xa2\x08\x91\x56\x14\x23\x35\x7a\x70\x86\x4f\x1f\xff\xa3\x47\xff\xf4\xf1\xbf\xf3\x7d\x80\xba\xcc\x09\x96\x04\x1d\x36\x12\x27\x6c\xac\x1b\x46\xfc\x1d\xed\xe1\xb6\x10\x22\x7c\x87\xe6\x7e\x1d\x43\xf6\xcd\x96\x01\xd6\x27\xf2\xe9\x38\x01\x3a\xeb\x6d\x67\xdf\x53\x09\x6b\x17\x72\x99\x98\x3d\xbd\xeb\xc9\x24\x6a\x20\x44\xc8\xbe\x77\xe8\x3d\x35\xd0\x84\x8d\x4f\xb6\x23\x38\x41\xc7\x01\xee\x7d\xd8\x78\xf1\x3f\xe4\x84\x6b\xe2\xf9\x53\xa1\xcb\xbe\xa1\x78\x9c\x26\x15\xb5\xfd\x53\xf7\x91\xd8\x44\xbb\x24\x06\xee\xc9\xd8\x95\x35\xd0\x11\x72\x8e\x54\x08\x7a\x80\xae\x06\x6c\x63\x9d\x93\xf0\x47\x42\xa6\xba\x39\x44\x6a\xb2\x6f\xd0\x9b\xe1\x54\xa0\x88\xa1\xc9\x86\x60\x85\xd6\x85\x07\x8a\xd0\x91\x69\xd1\x5b\xee\x84\x15\x12\xd1\xae\x77\x9a\x09\xd0\x05\x6f\x53\x88\xd6\xaf\x81\xb3\x69\xcb\x16\xf5\xa4\x80\x91\xc4\xcb\x07\xf2\x02\x12\x32\x74\x32\x05\xe5\x04\xcc\x76\xe9\x68\x01\x3f\x4a\xc0\xc5\xd3\x01\x0c\x7a\x1f\x34\x8e\xdb\x35\xa7\x5b\x53\xea\xf5\x92\xe0\x6d\xb6\xe6\xde\x0d\xd0\x50\x2a\xd0\x8b\x3b\x92\x95\x8d\xc5\x44\x4d\x49\x9e\x1f\xc7\x24\xd8\x82\x25\x6e\xef\x67\xa4\xe5\x9a\xd8\xea\x18\x82\x69\x51\x88\x4f\xd1\x72\xb2\xe6\x71\x8c\x42\x3c\x1a\x32\xeb\x61\xd3\x5a\xd3\x96\xb3\x3f\x5a\x30\x4e\x32\xe8\xc5\x73\x34\x86\x98\x0b\xef\x31\xa7\x36\x44\xfb\x9e\x1a\x20\x9f\xac\x66\xd3\xa6\x25\x4f\x02\xb6\x27\x6a\xf4\x24\xdf\xc0\x1b\xc9\x00\xf2\x86\x58\xcf\xf5\xc6\xf2\x3d\x5c\x32\x13\xb3\x82\x5f\x65\x55\xa4\xad\xfe\x87\x0b\xb8\x3e\xe2\xe7\x98\x20\x26\x78\xb6\x2c\x4c\x7f\x9c\x20\x26\xf8\x26\x9b\xa4\x70\x72\xee\xfb\x10\x13\x2c\x33\x5b\x4f\xcc\x20\x3a\xa7\x06\xab\x38\x3d\xb9\x45\x8b\x0f\x04\xb8\x03\x76\x0a\xb8\x71\xc8\x2c\x24\xd5\x75\xa7\x45\x26\xb0\x17\x15\x8d\x12\xbb\x9d\x90\x92\x17\xaf\x9a\xaa\x1d\x26\xc4\x3e\x44\x09\x6e\x91\x99\x86\x1e\xc8\x85\x5e\x01\x90\x19\xbd\x92\x55\x7d\xe9\x63\x50\x8c\x19\x96\x28\x48\x87\xca\x81\xfd\xad\xcb\x19\x6e\x0b\xf1\xc5\x82\xa7\xb4\x09\xf1\x5e\xe9\xae\xc9\x33\xc2\x15\x49\x70\x20\xdd\xa3\xeb\x05\xb3\x14\x8e\x27\xa8\x23\x6c\x34\xe7\xa6\xd0\xdc\x91\x69\x7d\x70\x61\x3d\xc0\xc9\xf5\xdd\x5c\x38\xd1\x10\xdb\xb5\xd7\xc3\x5c\xdf\x4d\x70\x85\x0e\x3d\xae\x29\x16\xdf\xfe\xa9\x0a\x3f\x7a\xe1\x43\xb2\x2b\x2b\xc7\x59\x01\x9b\x96\x9a\x2c\xe0\x8c\x89\x71\x42\x8b\xf5\xe2\x74\xcc\xe3\x0e\x55\xc8\xd0\x1b\x9a\x97\x5c\xac\x65\x0b\x7a\x8a\x36\x34\x5a\x37\x46\x81\xaa\x72\x52\xec\x17\x6c\xca\xa6\x45\x4e\x8a\x3c\x4b\xa9\x55\x94\x54\xd2\x6a\x1d\x3b\xdc\xed\x14\xb6\x62\xd8\xe4\x58\x43\x2c\xcb\xd0\x27\x6b\x6c\xaf\x47\xae\xcb\x24\x37\x28\x3e\x58\x23\x08\x73\xee\xfa\x22\x4e\xe2\x91\x1e\xff\x75\xb4\xa2\x5e\x61\x1a\xda\xcc\xb2\x05\x9a\x16\x3c\x6d\xd4\x82\x5d\x7b\x75\x5b\xb9\xdc\x85\xa6\x60\xa4\xb5\x72\x7a\x96\x4a\x49\x13\x44\xb2\xc4\x89\x28\x49\x84\xbb\x24\x52\x94\xb6\xe5\x70\x8f\xb2\x32\x95\x6b\xda\xed\x56\x4c\xd3\x69\x34\x6a\xbd\xe4\x77\x88\x22\xa1\x54\xa8\x2d\x38\x1d\x4f\x58\x38\x89\x63\x5e\xc3\x09\xce\xe7\xe5\xd4\x57\xd8\xa3\x91\x5d\x0b\x15\xb6\xfc\x76\x01\x1b\x58\xa2\x43\x6f\x44\x65\x93\x90\xca\xbe\xcd\x13\x9a\x66\x16\xa5\x44\x11\x34\x11\x42\xf0\x24\x59\x80\x71\x38\x15\x10\x5b\x72\xfd\x7e\x09\x2b\x08\x14\x19\x97\x82\x2d\xbc\x2a\x91\x19\x33\xe0\xe9\x34\x9f\x82\xea\xf7\xd2\xb7\xc1\x84\xb0\x44\x73\x9f\x7b\x90\x52\x58\x80\x25\xaf\xf5\xe8\x62\xa6\x2d\x9a\x5d\xc0\xa5\x73\xc0\xe4\xd9\x26\xfb\x50\x17\x55\x81\x8c\xc4\x29\x48\xb9\xaf\x3d\x0c\x4a\x29\xe7\xe0\x8b\x44\x09\xfb\x0a\x87\x17\x5b\x5b\x7f\xcd\x52\x14\x74\x4b\x25\xb6\x89\x36\x49\x7f\x23\xa6\x42\x8e\x66\x2c\x41\x14\xe5\x20\x45\x10\x30\x49\xb2\x72\x02\x84\x0d\x91\x94\x92\x25\xb2\xe5\x9d\xd1\x6b\x29\x8d\x82\x3e\xee\x6c\x4b\x66\xff\x44\xe3\x0d\xda\x43\xdb\xdf\x55\x7b\x9a\x18\xcc\xc1\x68\xd1\x02\xad\x5e\x6a\x4d\x33\x0a\x55\x3c\x65\x4f\x2c\x81\xcb\x5d\x6d\x4c\x62\x1a\xe0\xe4\xe5\xf3\x39\x34\x38\x70\x2d\x7d\xd3\xe9\xe3\xf6\xc1\x13\x9c\x9c\xcf\x61\x20\x8c\xa7\xa5\x78\x1d\xf0\xd3\xd1\x1a\x5d\xb5\xb1\xce\x0e\x53\x88\xc3\x5e\x93\x7b\xc4\xed\x48\x50\x63\x14\x56\xab\x33\xb6\xa9\xda\xea\xb2\x4b\xb6\x77\x04\x7d\xb0\x92\x20\x61\x35\xe9\x28\x46\x95\xae\x05\x5b\x8a\xca\x1a\xc8\x9b\x38\x94\xc4\x97\x9f\xef\x69\xca\xfb\xdd\xce\x77\xc4\xc5\x5c\x25\x96\xf2\x65\x52\xcd\x44\x1e\x14\x7a\xa3\x91\x80\xb7\x19\x63\xa2\x38\x59\x68\x82\x5f\xd9\x75\xde\xea\xec\xc1\xda\xb4\xb1\x75\xb1\xa0\xf5\x73\x98\xff\xe8\xa8\xdb\xae\x69\x97\x05\x1d\x51\x62\xcd\xbd\x55\x70\x2e\x6c\x04\x01\xe1\x13\x45\x8b\xbb\xd4\xf8\xa1\xd6\x21\xeb\x57\x11\x39\xc5\x6c\x52\xd6\xe6\x08\xd3\x58\x93\xf9\x18\x0d\x77\x8e\x15\x5f\xcf\x9c\x94\xc8\xa9\x5b\x27\x45\xdf\x64\xdb\x65\x4e\xa2\xf8\xe0\x6c\x67\x53\xed\xad\x81\x49\xf0\xc0\x38\x40\x1f\x36\x14\x75\x37\x37\x9c\x6e\x51\xb7\x7c\x7f\x86\x31\xe2\x50\x34\x7d\x37\x5d\x14\x30\x27\xed\xfe\x74\xeb\xf9\x02\xae\x46\x07\x4d\x88\x45\x22\x83\x57\xd5\xfc\xbc\x13\x31\xe4\x44\x72\xeb\xe0\x8d\x4d\xa6\xa5\xda\x69\x86\xd4\x92\x94\x4d\xa9\x16\x0c\xce\xfa\xfb\x32\xfd\xb6\x16\x90\x1b\x3d\xea\xe5\x3a\x52\xbd\x99\x9d\xdc\xde\x5c\xf2\x7c\xfe\x28\x5a\x46\xd4\x46\xea\xc4\x2e\xbf\xb8\xc7\x48\x0b\x90\xe9\x65\xf2\xb6\x52\xa2\xe0\x1c\xa9\x77\x68\x8a\x0c\x8f\x97\xaa\x0d\xf9\x34\x9c\xad\x42\x8e\x70\xf2\xe2\xd5\x1c\xda\x90\xe3\x24\x4d\xc4\x27\x29\xdc\xd3\x80\x7d\x3e\x5e\x63\x88\xd2\x14\x77\x5b\xef\x62\x7e\x4b\x87\x44\x71\x85\x86\xc0\x60\x6c\x78\x94\xd0\xb2\xd7\xaf\x78\xcc\xb1\xab\x2a\x1d\x74\x6b\xd7\xed\x7e\xf5\xd4\xdd\x32\x93\xd2\x07\x3a\x1b\xa3\xde\x13\xbe\xaa\x85\xdc\x4f\xb0\x6d\x23\x2a\x2d\xa2\xf5\x59\xac\xff\xd4\xe4\x7a\x43\x46\x92\x72\x28\x95\x44\xf0\x55\x2d\x71\x81\xb9\xd0\xb0\xc8\xdb\xd8\xba\x5a\x0f\x77\x5a\x78\x5e\x2e\xe0\x09\x1b\x7b\x0f\x02\x47\x0d\x8e\x62\x83\x8d\xdc\x72\x94\xb7\xe5\x72\x3c\x5e\xd7\xc6\x17\x8a\x82\x43\xbd\xb7\xcb\xc9\x76\x68\x5f\x85\xae\x97\xdb\x37\xfa\xe6\x0f\x21\xee\x2e\xe1\x87\xbc\x3a\x15\x11\xe3\xe0\x3d\xb9\xd3\x4a\xa0\xda\x07\x48\x2a\x46\x52\x72\x28\x47\xa6\x42\x38\xfa\x52\x5e\x05\x1a\xb9\x8e\x48\x48\x27\xe1\x1e\x9d\x8d\xc4\xbd\xdc\x1f\x34\xbc\x23\x49\x13\xf2\x7d\xa9\x8f\xe5\x67\x4d\xef\x14\x80\x3a\x8a\x6b\xf2\x66\x9c\xa7\x8d\x5e\x49\xfd\x83\x4b\xc5\xd6\x97\xda\x85\x35\xc7\x91\x10\xda\xd7\xf6\x4e\x02\xf3\xc5\xfa\xb5\x3b\x62\x7d\x6a\x92\x43\x7d\x7b\x76\xf8\x39\x32\xf6\x78\xe8\x6b\x97\xcd\x3e\x3c\x7b\x36\x7d\xf9\x7a\xf6\x0c\x00\x3e\xc0\xc1\xa0\x0c\xdd\x8e\xbd\x7c\xfd\xbe\x65\xd7\x9d\xed\xa8\x8e\xfd\x45\x28\x75\x23\x94\x0a\x51\x86\xc4\xfe\xd5\xde\x6d\x66\xbb\x78\x77\xf0\x3a\xf4\x0f\xbd\x21\xa8\x07\xc7\xa6\xe8\xd8\x75\xe9\x00\x65\x07\x1d\x9a\x7d\xfb\xe7\xc3\xcf\x91\xb1\xc7\x43\x5f\xbb\x6c\xf6\x01\xfe\x26\x42\xb1\xf7\xa9\x63\x29\x4c\x87\x5e\x3e\x97\xde\x27\x27\xe2\xf2\xfd\x5c\x65\x68\x7f\xd9\xf7\xd2\x1c\xa1\x9b\x0c\xcd\x3e\xc0\xe1\xe7\x03\x5c\x15\xdd\x08\x99\xb7\x43\x8f\xa7\x1c\x5b\x76\x38\xf4\xff\xe7\xcf\x57\x3a\xf2\xdb\xf5\xff\x7b\x6a\x6c\xee\xf6\x1d\xb9\x4d\x28\xfd\x42\x33\x19\x7a\x51\xca\xca\xf6\xfb\xab\xbd\xef\x75\x59\xe8\x68\xa2\xaa\x4f\xc7\x77\xef\xb1\xf9\xeb\xf1\xd1\xe7\x3d\x47\xf5\x71\x61\xf5\xb4\xfd\x2f\x0e\x3d\x61\x3f\xa9\xee\xb9\xa4\x2f\x45\x9f\xf1\xff\x67\xda\xd7\xc7\x30\x9a\x24\xc5\x2f\x6c\xbf\xdc\xc8\x4a\x71\xaa\xf6\x7f\xe7\xff\x97\xfc\xbf\x09\x9b\x03\x47\x6e\x6a\x9f\x3b\x19\x7a\xf5\x88\xff\x3f\xd0\xbb\xf4\x78\xd9\x2f\xc9\xff\x6d\xdd\x6d\x70\x18\xc7\x7e\xe7\xff\xe7\xed\xff\x0a\xfc\x9f\xd5\xae\xf3\x02\xf6\xda\x01\xed\x73\x76\xcd\xc0\x4d\xe9\x2f\x67\xff\x0b\x00\x00\xff\xff\x9b\xb5\x75\x45\x6b\x1b\x00\x00")
func complySoc2PoliciesAvailabilityMdBytes() ([]byte, error) {
return bindataRead(
@@ -537,7 +537,7 @@ func complySoc2PoliciesAvailabilityMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/availability.md", size: 164, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/availability.md", size: 7019, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -557,12 +557,12 @@ func complySoc2PoliciesChangeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/change.md", size: 2793, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/change.md", size: 2793, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesClassificationMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\x8b\x31\xaa\xc3\x30\x10\x05\xfb\x3d\xc5\x83\x5f\xeb\x13\xa7\x8a\xd5\xca\x4d\x52\x99\x38\x17\x58\x64\x39\x6c\x90\x76\xc1\x92\x03\xbe\x7d\xb0\xab\x81\x61\x46\xb9\x24\x8f\x81\x1b\x23\x64\xae\x55\x16\x89\xdc\xc4\x14\xa3\x65\x89\x3b\x71\x5c\x4d\xf7\xe2\x31\x84\x91\x2a\x37\xa9\x8b\xa4\xea\x09\x78\x4d\xe1\x00\xe0\x10\x42\xff\xdf\x53\xe1\x8f\xad\xcf\xf4\x95\x2a\xa6\x67\xe2\x30\x73\x4b\x1e\x8f\x4d\xd1\xe1\x7a\xe9\x6e\xe7\x10\xad\x94\xa4\xcd\xe3\xae\xd2\x84\x33\x66\x8b\xdb\x61\xc8\x39\x47\xf4\x87\x60\x45\xf4\x8d\xc9\x4c\x7f\x01\x00\x00\xff\xff\x86\x01\x62\xfa\xa1\x00\x00\x00")
var _complySoc2PoliciesClassificationMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5b\xcb\x8e\x1b\x37\x97\xde\x17\xfe\x87\x38\x40\x80\x19\xbb\xa3\xd6\xc4\x33\x9b\x49\x07\x59\x74\x9c\x18\xe9\x41\x3c\x31\xdc\x1d\xcc\x66\x36\x14\xeb\x94\x8a\x69\x16\x59\x21\x59\x92\x2b\xe8\x85\x5f\x63\x80\xe4\xe5\xfc\x24\x83\x73\xc8\xba\x49\x25\xb5\xd4\xb6\xff\x6c\xd2\x1b\xc3\xc5\xdb\xb9\x7c\xe7\x4a\xca\x88\x0a\xaf\xe0\x7b\x11\x04\xbc\xd4\xc2\x7b\x55\x28\x29\x82\xb2\x06\xde\x58\xad\x64\x9b\x09\xe9\xac\x69\xab\x2b\xf8\xfe\xe5\x9b\xcc\x8b\xa0\x7c\xa1\xd0\x5f\x65\x00\x77\xb7\x2f\xe9\x1f\x80\x4b\x78\xf9\xf2\xeb\xe5\xd7\x59\x25\x7e\xb5\xee\x2d\x6e\x94\x57\xd6\xf0\x94\x4b\xc8\x45\xc0\x2b\xf8\xaf\xc6\xc0\x0b\xf8\xf7\xaf\x5e\xfc\x27\x2f\x90\xb6\xaa\xd0\x84\x2b\xb8\x31\x2a\x28\xa1\x21\xb7\xb2\xa1\x2f\xd9\xe5\xe5\x65\xf6\x05\x5c\xd7\x35\x9a\x5c\x49\xf4\x90\x65\xe9\x3f\xef\xe0\xfa\x0a\x7e\x14\x26\xd7\xca\xac\xc1\x16\x3d\xb9\x98\xc3\x8d\x29\xac\xab\x98\xec\xd1\xfc\xef\xae\xe0\x95\x75\x15\x91\x67\x4d\xa1\x72\x34\x74\x96\x0a\x2d\xdc\x06\x11\x90\xcf\xcb\xbe\x80\x37\x8d\xab\xad\x47\x10\x26\x87\x5b\x69\x6b\xcc\x32\xb1\x84\xbb\x52\x79\x22\x5e\x80\x9c\x8a\xa5\x66\xb1\x40\x8e\x85\x32\xe8\x21\x94\x08\x0e\x7f\x6b\x94\xe3\x0d\x3d\x04\x0b\x68\x7c\xe3\x10\x42\x29\x02\xa8\x81\x34\xd8\xaa\x50\x2a\xc3\x4b\xac\x5b\x0b\xa3\x7e\x8f\xdf\x95\x87\xda\xd9\x80\x32\x60\x0e\x22\x80\x30\x20\xea\xda\xd9\xda\x29\x11\x10\x34\x6e\x50\x2f\x47\x54\x25\x61\xd1\x24\xad\x90\x8f\xa4\x3d\x89\x3f\x87\xe0\x89\x07\x12\xd0\xee\x39\x1f\xde\xff\x9f\x9f\xd0\xe3\x51\x36\x8e\xe4\x51\x3b\xbb\x76\xa2\x5a\xc2\x0d\x11\x2c\x75\x93\xa3\x07\xa1\x35\x84\xb6\x46\x4f\x7b\x8d\x96\x2d\xc0\xe1\x5a\xb8\x5c\xa3\x8f\x43\xc1\x03\x0d\x2e\xc0\x37\xb2\x04\xe1\xa1\x16\x35\x3a\xb0\x0e\x50\xa3\x0c\xce\x1a\x25\x7b\xa2\xfd\x22\x92\x1d\xa5\xe9\x59\xea\x24\xe6\x95\xf0\x48\x63\x26\x87\x7b\x63\xb7\x1a\xf3\x35\x51\x3f\xa1\x37\x0a\xd4\x83\xb1\x01\xb6\x4e\x85\x80\x66\x24\x96\xa4\x98\x91\x50\x88\x03\x65\x72\xb5\x51\x79\x23\x74\x3c\xcb\xb7\x3e\x60\xe5\xe3\x5e\xa5\xd8\x20\x08\x29\x89\x93\x60\x27\x67\xdd\x63\x1d\x60\xd5\xee\x09\x71\x49\x90\xf9\x4e\xc8\xfb\xb5\xb3\x8d\xc9\xf7\x8e\x1f\xe3\xa2\x54\xeb\x32\x6a\x0f\xec\xea\x57\x94\x41\x6d\x30\x52\xa1\xaa\x5a\x33\x5e\x12\x00\x8c\x0f\xae\x91\x51\x22\x85\x75\xf3\xaa\x9b\x43\xa3\x97\x25\x56\x98\x48\xe8\x55\x37\x37\x93\xe9\x20\x09\x7b\xd8\xa2\xd6\xac\x27\x67\x25\xe6\x8d\xc3\xe1\xd0\xe9\xa2\x05\x68\xb1\x42\x36\x38\xa2\xba\x1c\x59\x1f\x1f\x71\x00\xd0\xcb\x3d\x83\xa3\xd5\xc6\x9a\xcb\x5c\x79\xa9\x2d\x5b\x87\x58\x3b\x4c\x26\x53\x09\x65\x82\x50\x06\xf3\x39\x89\x43\xd5\xf8\x00\x0e\x0b\x74\x68\x24\x99\x55\x2f\x6d\x56\xc6\xdb\x6e\xc4\xb3\x32\xde\x2a\x7f\x0f\xd7\xde\xa3\xf7\x6c\x24\xc9\x8d\xd1\xd0\x6d\x87\xf7\x1b\x23\x99\x3a\x78\x2d\x8c\x58\xe3\x64\xde\x17\xe3\x15\x37\x45\x2f\x11\xcc\x27\xf8\x50\x1e\x1c\x4a\x54\x1b\xcc\xa1\x70\xb6\x02\xdb\x04\xaf\x72\xdc\x23\x7f\xc1\x5f\x6a\x74\x9e\x3c\x40\x69\xbb\x65\x11\x22\xe3\x2d\x99\xcf\x74\x5c\x0b\x8a\x4c\x91\xc0\x69\x5d\x2e\x88\x6f\x12\x76\x74\x37\x8d\x46\x52\x1e\x7a\xe9\xd4\x8a\xe9\x9a\x08\x05\xee\x86\x03\x43\x89\x0e\x57\x2d\x6c\x95\xd6\xb0\x42\x69\xab\x44\xe1\xd6\x90\x89\x16\xbb\x44\x2c\x3f\x15\xdb\x03\x60\xc8\xa7\x91\x4f\x70\x81\xce\x5b\x35\x9e\xec\xc3\x83\xad\xd1\x75\x3e\x80\x2c\x43\x05\xb2\xda\x67\xb8\x5c\x2f\x17\x20\x1b\x1f\x6c\x85\x2e\xc2\x8c\xbc\xae\xb3\x1b\x95\x63\x0e\x52\xdb\x26\x07\x8f\x6e\x43\xe1\xe1\xf9\x62\x4f\x8a\xbb\x08\x1e\xe1\x7d\xc2\x38\x3b\xab\x89\x5b\x63\xf9\xaf\x10\x2a\x91\xe3\x21\xd9\xfb\x1a\x65\xbf\xb9\xef\x04\xe8\x90\xbe\x93\x79\x0f\x94\x27\x12\x07\x9c\xb3\x44\x2c\x29\x04\x34\xae\x85\x9e\x84\x8d\x28\xf7\xff\x29\xb1\x67\xa0\x25\x43\x9b\xd0\x47\x27\x25\x6f\x52\x80\xdc\xb1\x30\x8a\x0b\x18\xd0\x55\xc9\x8c\xae\x32\x8e\xb4\x2a\xa2\x61\x23\x74\x83\x33\xea\x5e\x00\xf9\xdd\x9c\x24\xac\xaa\x5a\xc8\xe0\x21\x6e\xca\x8a\xcf\x1b\x47\x44\x30\x87\x64\x55\x62\xb0\x2a\x76\x1d\xde\x2f\xe1\xb5\x75\x53\xf9\x5b\xb3\x3b\xd9\x47\xe2\x0a\xa6\x2c\xb9\x8b\x79\x2b\x85\x67\x83\x99\x3f\x13\xcf\x9f\x2f\x7b\x26\x6e\xd1\x78\xc5\x28\x89\xce\x44\x3a\x15\x94\x8c\xac\x1f\x65\xab\xf3\xc3\x48\x3e\x84\x0e\x95\x42\xcb\x46\x0b\x0a\xb5\xe4\xf4\x50\x4c\x61\x00\x2a\x60\x75\x84\xf3\x81\xa4\x9f\x48\x89\x1c\x0c\x69\x3b\xeb\x12\x61\xd6\x04\x27\x64\x68\x04\x39\x7d\xad\xd6\x11\x29\x4b\xc8\xb2\xec\xcb\xcb\xfd\xbf\x99\x6f\x73\xd3\x4e\x1a\xcb\x1e\x2e\x2e\x76\x3c\xef\xc5\xc5\x03\x5c\x5c\xfc\x44\x5e\xfc\xe2\x02\xd2\xdf\x03\xc0\xc5\xc5\x34\xdb\xeb\x07\x79\xec\x9a\x43\xe2\xb0\xa0\x1b\xcb\x1e\xf8\xdf\x8b\x8b\x9f\x08\x86\x69\xfc\x01\xf6\xfe\x1e\x78\xd2\x4b\xa7\x02\x3a\x25\xa6\xfb\x10\x3d\x6f\xd1\x07\xa7\x62\xbc\x1b\x8f\x3e\x64\x5f\x7e\xbb\xff\x37\xf3\x6d\x6e\xda\x29\x63\xdf\x7e\x99\x3d\xc0\x9b\x66\xa5\x95\x9c\xd2\xf4\xca\xba\xe9\xf7\x07\x78\x2d\xee\x3b\x0c\x8c\xf1\xc1\x63\x37\x53\x97\x28\x36\x42\x69\xb1\xd2\x18\x59\xcf\x66\x44\x02\x0f\xf0\x16\x35\x0a\x8f\xe3\x4f\x75\x3c\x92\xbd\x33\x65\x35\xa5\x70\x55\x37\x96\xb2\xba\x38\x65\x39\xa7\x87\x7d\xa1\xef\x7d\xda\xcf\x35\xcd\xe1\xe9\xfb\x5b\x9d\x7e\x90\x30\x2d\x6c\x45\xbb\x3c\x69\xfa\x30\xf6\x4f\xb0\x89\x53\x39\x38\x42\xe5\x5f\xcd\xc1\x8d\x09\xe8\x8c\xd0\xf0\x4b\x07\x9f\x99\x6f\x0f\xf0\x8b\x11\x4d\x28\xad\x53\xbf\x53\xcc\x8d\x69\xed\x30\xfd\x20\x62\x4f\x17\x51\x25\x5a\x90\xa2\xf1\x08\x95\x32\x96\xa2\x73\x25\xd6\x08\x1d\x5c\x29\xdf\xc6\xaa\xd6\xb6\xc5\x94\xe7\xf6\x5b\x9d\x03\xa4\xfc\xdf\x38\xef\x97\xd6\x6c\xd0\x28\x0e\x06\xdd\xd8\xc0\x5e\x28\x95\xcb\x39\xab\x50\xe8\x97\x4f\x41\x6c\xb2\xaf\x69\xe2\x9a\xc6\xfe\x86\xe5\x29\x1c\x74\x6e\x1c\xf3\x81\xa4\xbd\x6f\xf3\xb0\x0c\x16\x3e\x1d\x2c\x27\x49\x74\x0f\xd1\x5e\xcb\xa2\xcf\xda\x80\x8a\xb6\x9a\x12\x86\xf3\xbc\xa8\xb4\x86\x92\x5c\xc7\x94\x25\xd0\x77\x1c\x4c\x01\x4f\x9c\x76\xac\x9e\xe9\xa6\xfb\xcc\x38\x59\xc0\x30\x36\x41\xfa\x44\xc9\x4f\x8e\x03\xff\xea\x8f\x4c\x7f\xca\x01\x0e\xeb\x26\x8c\x8d\xe8\xd8\xf4\xd9\xb1\xbf\x6d\xee\x14\x0e\xc6\xf9\x5d\x3a\x76\xe6\xd3\x53\x6d\x6e\xac\x8e\x4f\x62\x7e\xd6\xe8\x96\x0e\xee\x2d\x30\xf6\x63\x2e\xcf\x33\x3f\x11\x84\x0f\xce\xd6\xa5\x92\xbb\xe6\x17\x3b\x3b\xa9\xaa\xd8\x51\xc7\xc9\x07\xec\x9a\xde\xb0\xd7\xc3\x5c\x7c\x38\xff\x80\x1d\xd3\x1b\x19\xcb\x5f\x0c\xa8\xec\x8e\x35\xff\x1f\x57\x13\x54\xec\xb6\x6f\x38\xdd\xf7\x59\xf6\x2f\x66\xe5\xeb\x6f\xb2\x2c\x5f\x4e\xa6\x77\x85\xf3\xa8\x67\xd0\xd7\x5f\xbb\x75\x6a\x6c\x43\x51\x35\x3e\xaa\x08\x13\x11\xcb\x2c\xc3\xe9\xce\xdc\x2b\x1b\x77\x2c\xb9\x7b\x17\x8b\x78\x0f\xbe\xb4\x8d\xce\x21\x38\x86\x18\x81\x8e\x4b\x64\xbb\xa5\x62\x6f\xf6\xe0\xd8\xf6\x8b\xfd\x59\xcf\x7d\xd6\x1c\x7f\x6b\xfa\x26\x2b\x45\x86\xd4\x89\xed\x2a\x6e\xee\x9e\x88\x8d\x55\x39\x15\x03\x8d\x31\x48\xb6\x24\x1c\x37\x4d\xf3\xd8\xb3\x03\x69\x3d\x97\xf0\xc5\x94\xfa\x91\x3c\x84\x87\x0f\xef\xff\x18\x82\xe3\x87\xf7\x7f\x82\x75\xf4\x6d\x2c\x6b\xfa\xda\x09\x53\x48\x69\xab\x5a\x18\x15\xbb\x62\x02\xb4\xf2\xdc\x41\x19\x19\x76\x6c\xf2\x30\xfc\xb7\xa5\x92\xe5\x5e\xb1\x12\xbb\x1d\xc9\xf8\x52\xcb\xc9\x88\x0a\x3d\x1d\xfe\xab\x5d\x41\xd1\x18\xd9\xb7\x32\xba\xed\xb6\xa5\x8d\xad\xd1\x58\xff\xae\xcb\xc0\x61\x34\x79\x91\x9d\xae\xf6\x32\xcb\xd6\x8f\xb0\x3d\x4e\x55\x9f\xca\x22\x3b\x12\x55\x4c\xfa\xb9\x07\x7b\x4f\x5c\x4f\xed\x34\x77\x69\x52\xd7\x84\x5e\x66\x59\x79\x0e\xd0\x52\x07\x72\xa3\x70\x1b\xbb\xa4\xb3\xd8\x8a\x7d\x08\x35\xed\x59\x0b\xef\x31\x78\xc0\x0d\xba\x16\x0a\xb5\x41\x68\x51\xb8\x94\x2c\x70\x57\x01\xb6\x25\x72\x57\xe8\xf0\xce\x09\xe8\x64\x62\xa5\x30\x6b\xcc\xb9\x55\xe4\x68\x53\xa8\xad\xf7\x6a\xa5\x71\x71\xc8\xd4\x86\xc5\x64\x19\x0e\xf3\xd8\x6b\xa2\x8a\x97\x9b\x69\x97\xd1\x56\xbd\x2d\xc2\x56\x38\xec\x7b\x6b\x43\xcf\x2d\xd8\xbe\xad\xe5\x17\x3b\xa2\x69\x4c\xde\x93\x4e\xba\x6c\xb9\x55\xcd\x5d\x10\xab\xa3\xe0\x84\xf6\xf6\x89\xd2\xeb\xba\xf5\xa2\x08\xb3\x2d\xb5\x28\x0e\x46\x73\x9c\x22\x7a\x4a\x99\x0e\xde\x89\x4b\xeb\xbe\xd3\x91\x44\x47\xc5\x8a\xdd\x46\xe0\xed\xed\xeb\xcf\x15\xe6\x9c\x37\xe4\xd6\x79\x8c\xbe\xd6\xb1\xf7\x48\x28\x2c\x2c\x1d\xad\xcc\x7a\xe8\xd1\xbd\xe1\x1b\x93\xfe\x8e\xe4\xea\x88\x9c\xb8\xcf\x9f\x13\x33\x43\x83\x2b\xd8\x9a\x01\xb5\xb2\x21\xd8\x8a\x44\xc9\x6d\xad\xfe\xa2\xa8\x16\x6b\xfc\x86\x3b\xca\x3e\x6a\x63\x6f\x87\xc2\x59\x13\xba\x4e\x9a\xb4\x9b\x74\x7f\x63\x36\xa8\x6d\x8d\x20\x85\x73\xdc\x90\x8c\x57\x3c\xa3\x2b\xa8\xa1\xbb\xda\xed\xa4\xf8\x8a\xa0\xb0\x9a\x80\x31\xf1\x4c\xfd\x2a\xe5\xc1\x07\x4b\xd2\x83\x9b\x62\xbc\x5d\xba\xdc\x49\xa2\x5b\xf0\xdd\x52\x8e\x85\x68\x74\xd8\xbd\xcf\x50\x7e\x52\x02\x0f\x7d\xb9\x1f\x66\x2e\x9d\x3e\x87\x40\x3f\x07\xed\x37\xfb\xd8\x3f\x4a\xba\x39\x72\x99\xd6\x03\x71\x8f\xbb\x64\xc1\xc9\x37\x7a\xe9\x10\xa7\x8d\xf2\x6e\x1e\x8f\x90\x7f\x32\x90\x2b\x5f\x6b\x31\x60\x60\x12\x01\x88\xfa\x19\xf1\x57\x42\xe9\x93\x25\xaf\x3a\xf8\x38\x1f\x40\x2b\xd3\xb7\xab\x91\xb6\x81\x95\xcd\x5b\x96\xb8\x3a\x57\xd2\xfb\x01\xe8\x20\xc1\x04\x4a\x4a\x2e\x2b\xcc\x95\x80\x67\xb9\xf2\xf7\x7e\x01\x15\x56\xd6\x51\x5a\xeb\x72\xbf\x00\x0c\x72\xf9\xfc\x18\x57\x07\xc5\x4e\xa0\xf2\x8d\x2b\x84\xec\x79\xe3\x83\x3e\x0f\x5b\x63\x28\x05\x27\x8c\xaf\x54\x60\x62\x9c\xd0\xba\x3d\xc6\xc0\xe0\xe3\x08\xd7\xca\xf2\x85\x02\x16\xd6\x21\xa1\x40\x36\xde\xef\x5c\x4d\x74\xed\x72\xe2\xec\x32\xd8\x4b\xe6\x50\xda\xaa\x6a\x4c\x7f\x0d\xb3\x6a\x21\xa0\xc6\xba\xb4\x06\x17\xec\xaf\x4d\x9b\x6e\x43\x2a\x14\x31\xff\x20\xd2\xa6\xeb\x12\x4b\x62\x09\xd7\x5a\xf7\xb9\x40\x04\x2e\x9d\x78\xe0\x96\x8a\x55\x10\xdd\x2c\xf3\xb9\x6e\x54\x8e\x9a\xef\x65\x29\xc5\x88\x68\x1b\xde\x15\x2c\x48\x98\x8f\x3f\x2d\x58\x7e\x78\xff\x67\xf6\x34\x5a\x28\x36\x6a\x0c\xf1\x91\x81\x6f\x56\x95\x0a\x20\x0e\x3f\x4b\x48\x91\x82\x82\x61\xc5\x10\x09\x08\xbe\xa9\x29\x4c\x79\xcb\x9e\x39\x05\x5b\xa8\xad\x32\xe1\xd2\x16\x97\x14\x71\x85\x0c\x4b\xb8\x06\x2f\xe8\xb0\x23\xbb\xb3\xdd\x0d\xfc\x7f\xd7\xc5\xb0\x78\x29\x4a\xe5\x49\xbc\x1f\x49\xf1\x4a\x55\xb5\xb3\x14\x9f\xc6\xf7\xbf\xc7\xb8\x5d\x21\x55\x38\xd6\x25\x41\xcf\xdd\xa3\x9d\x70\x15\x3b\xb9\x0c\x5a\x3d\x7f\x1e\xb1\xf0\x8f\xec\x7f\xc9\xf9\xae\x1c\x8a\x7b\xc8\x86\xb7\x22\x27\x3f\x0f\x79\x3c\xeb\x1b\x3c\x67\x7f\x7f\x79\x30\x84\x8f\xa0\x75\x71\xc5\x62\x8c\xa1\xfc\xfb\x2e\xf2\xec\xd8\xe4\x60\xb0\xd1\xe7\xd3\xdf\x8b\x25\xfc\x4c\xd9\xed\x4c\xd6\x4b\xe5\xf4\x28\x8f\x5d\xf6\x6b\xc6\x8b\x6f\x0a\xf0\x24\xb6\xe3\x37\xd0\x7d\x9c\xea\x98\xf3\x29\x7e\x3b\x5c\x93\x4d\x38\xcc\xd9\x61\x2f\x27\x84\xf5\x7c\x30\x29\x9c\x84\xaf\x30\x3e\x8e\x50\x06\x9c\xb5\x95\x67\xa5\xda\x26\x74\x97\x19\x1d\xa9\x07\xb6\xe9\x01\x52\xd9\x0d\xe6\x80\xef\x6a\xcc\x55\x50\xb6\xf1\xba\x8d\x57\xca\xb5\x53\x24\xa7\x18\xce\x0a\xf1\x0e\x2a\x21\x4b\x92\xf0\x10\x27\x87\x42\x6a\x72\xcc\xdd\x2c\x9f\x9c\x69\x30\x10\x41\x5b\x79\x8f\x39\x48\xb1\x52\x06\xc3\x31\x56\x57\x18\xbd\x66\x81\x8e\x56\xa7\x97\x0e\x7c\x85\x7b\xa8\xea\x88\x25\x0a\x1d\x23\xb5\xa5\x94\xba\x4b\x9f\xa6\xe7\x3c\x51\x5d\xa4\x9b\x44\x08\x08\x70\x18\x1a\x67\xe2\x5d\x7c\x1d\xba\x0c\xf6\xa8\xd0\x7b\x57\x12\x75\xd8\x29\xe0\x24\x91\x8f\x61\x3a\xa1\x2e\x16\x98\xdc\xf5\xb1\x75\xbb\x5b\x64\x9d\xb8\x34\x27\x6d\xda\xd9\xd5\x6a\xfa\x96\x24\xdb\x47\xff\xa3\x3a\xf7\xa2\xd8\x11\xcc\x74\xc9\x47\x28\x9b\x8b\xd6\xe0\x1a\x1f\xb6\xd6\x85\xb2\xed\x5e\x5d\x8c\x41\xc0\x1e\x1f\x85\x3e\x88\x87\x57\xe2\x5d\x77\xcf\xb8\x9b\x4b\xd6\xe8\x62\xd4\x7e\x9c\xfe\xa8\xc1\xbc\xaf\x93\x69\xbb\x3d\x87\x12\x13\x6f\x61\xd8\x95\x19\x7c\x17\xfa\x4b\xc6\xa8\xff\x18\x08\x46\x09\xd1\xe0\xc6\x3e\x9d\x0b\x1b\x2f\xe2\x07\x0f\x7d\x96\x0e\x54\x7e\xe2\xbb\x54\xe3\xc2\x46\x09\x68\x0c\x1a\xe9\xda\x9a\xaf\xeb\x95\x46\xf0\xa5\xe0\x5c\xa3\xaf\x52\xbb\x47\x67\xaf\xee\xde\xb0\xe5\xb4\x3d\x08\x6a\xe1\xfd\xd6\xba\x7c\x78\x5c\x37\x3d\xfc\x7a\xd2\x1b\x98\xa9\xfe\xb7\x5c\x28\xce\x57\x2e\xc3\x29\xfd\xcb\x3d\x46\x03\x21\xd9\xac\xfb\xb3\xf7\x15\x97\x92\x6a\x7b\xa8\x30\x4a\x89\xf6\xe8\x04\xd1\x04\x4b\x84\x49\x4a\xdc\x3a\x17\x16\xcb\xdd\x17\x5f\x41\xa5\x4c\x13\xba\x47\x7a\xe9\x25\x4d\xfb\xa8\xa7\x64\x6d\xf5\xfd\x1e\xf6\x29\x49\x75\x51\x02\xf1\x3d\xd8\xf8\xb1\x21\xe9\xb1\x6f\x01\x61\xff\x9e\xe7\x11\xd1\x8d\x77\x18\xaa\xbe\xf3\x30\x30\xaf\x77\x5b\x70\x0a\x19\xda\x1a\x77\xf4\xde\x23\xe6\x74\x0f\x84\x4e\x78\x3c\xc5\xff\x9c\xe1\x79\x46\xc0\xb5\xae\x7a\xdc\x82\xd3\x4a\x36\x60\x7e\x9c\xe9\x36\xe4\x91\x23\x4a\x48\x2a\xa9\xa9\xa2\xe7\x9f\xc9\x3d\x72\x40\x17\xba\x49\x8e\x47\xe5\xca\xfd\x3d\x56\x42\x4f\x7f\x67\x62\x3f\xde\xdd\xbd\xb9\x8d\xef\x64\x6f\x7f\x5c\xc2\xab\xc6\x51\xfe\x7e\x20\x5e\x0d\xcb\x69\xc1\x60\x25\x29\x80\x11\x2a\x47\x76\x12\x8b\xcc\x91\x13\xde\xef\xab\xdc\xc6\xe4\xec\x33\x79\xa2\x47\x9d\xc1\x47\xda\xfb\xc7\x1b\xf3\x68\xcf\x39\xf2\x22\x88\x58\xcd\xda\xf6\x25\xf5\x90\xa1\x8d\xe1\x53\x97\xad\x27\x0a\xa6\x92\x38\xec\x2d\x7e\xf1\x7d\x17\x54\xdb\x35\x05\xc5\x23\xa6\x1f\x83\x4f\x12\x75\xc0\xaa\xb6\x4e\x38\x45\xa0\x76\x1c\xd0\x84\x41\x13\x88\x4a\x2c\x02\xef\xb1\xb5\xee\xbe\xd6\x62\x2f\x7d\x11\x41\x0c\x68\x72\xe9\x42\x41\xb7\x09\x41\x06\x84\x5e\x5b\xa7\x42\x59\x4d\xbb\xfa\xfc\xaa\x19\x21\x47\x8d\x23\xbb\x38\x66\xc8\x27\xeb\x7e\x24\xc2\x50\x3a\xdb\xac\x4b\xa8\x1a\x1d\x14\x55\xbc\x81\x6a\xda\x26\x94\xb4\x7f\xaa\xd2\x9f\xbd\x7e\x75\xfd\xfc\x64\xed\x75\x16\xaa\x8c\x0f\x82\xcf\x18\x79\x81\x73\x6d\xff\x91\x33\xce\x45\x48\xac\x94\x58\x76\xa1\xed\xbb\xbb\x7c\x65\x60\xa9\xd6\x1c\xaa\x60\xbe\x32\xb0\xb6\xda\x4b\x25\x5e\x0b\xa5\x3f\x93\xed\xb2\x99\x21\x37\xa2\x63\xb5\x2d\x1c\x16\x0d\xd9\x97\x2c\x51\xde\xa7\x27\x9a\x52\xd5\x6a\x2f\x2d\xa5\x52\x3e\xbe\xa7\xf5\x81\x25\x12\xfb\xd9\x1f\xde\xff\x31\x53\x17\x7e\x78\xff\x27\xf7\xdb\x1e\x0f\xaf\x3f\x70\xcf\x6a\xdf\x15\xaa\x47\x72\xff\x13\xc0\x7a\x60\xeb\x3d\x79\xdf\xa6\x5e\xd6\x6b\x4a\xfa\x3f\x93\xe0\x79\x6f\xb2\x6b\x0a\x25\xfe\xe4\xc4\xeb\xb4\x0a\x88\xaa\x95\xa6\x3a\xaf\x5c\xbd\x1b\xad\x3b\x5a\xaf\x7e\x8c\x37\x8c\x5c\x73\xa5\x34\x61\xfb\x40\xde\x11\xa7\x9f\x5b\x8d\x3e\x49\x46\xe7\xd7\x88\x7d\x52\x94\x76\xda\x4d\x89\xac\x9b\x54\x67\x71\xd6\x09\x28\x3d\xcc\xf4\x7e\x39\x96\xe6\x7e\x86\x3a\x2c\xf6\x77\x1e\xab\xc5\x76\xf2\x8c\xbb\x51\xb3\xf4\x67\x6e\x96\x7e\x32\xeb\xd9\xf9\x91\xc9\x54\x0c\x93\x27\x1b\xfd\x06\x24\x41\x2a\x05\x39\xf3\x40\x86\x44\xea\x94\x13\x98\x53\xda\xb4\xe3\xf0\x95\x1f\x75\x51\x7b\x38\x1e\x6f\x9b\xf0\x6e\xbd\xbe\x6c\x63\xf2\xcb\xda\x59\x5b\xec\xdb\x17\xbf\xcb\x73\x7e\xd4\xef\x4b\xf4\x39\x94\xd6\xe5\xe3\xf3\x0e\xc2\xe3\xe5\x78\x0f\x69\x4d\xde\x70\x46\xd5\x05\xd5\xd1\xef\x86\x62\x73\xf8\x11\x13\xfb\x6f\x1b\x71\x23\x1d\x01\xbd\xbf\xc5\x1a\xd3\x29\x7a\x47\xb0\xcc\xb2\x1b\x33\x2d\x4d\x16\x9d\x3b\x88\xe5\x47\xff\xeb\x1c\x32\xce\xa6\x6a\xb4\x08\x6a\x83\xba\x5d\x30\x35\x31\xca\x89\x30\xac\x29\x52\x3b\xfb\xc0\x2d\x47\x55\x27\x1b\xdb\xf9\xd5\x4f\x7c\xc5\x1f\xb7\xe8\x1e\x43\xd0\x56\x7c\xb3\x78\xe8\x02\xf2\x92\x63\x48\xf7\xc4\x6e\x42\xf6\xe8\x17\x21\x4c\x51\x7c\xf7\x7e\x60\x27\xf6\x1f\xc9\x7c\x38\xbf\x3f\x24\x81\x65\xf6\x8f\x2c\xcb\xb2\xff\x0f\x00\x00\xff\xff\x20\x25\x48\x59\x28\x38\x00\x00")
func complySoc2PoliciesClassificationMdBytes() ([]byte, error) {
return bindataRead(
@@ -577,7 +577,7 @@ func complySoc2PoliciesClassificationMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/classification.md", size: 161, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/classification.md", size: 14376, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -597,7 +597,7 @@ func complySoc2PoliciesConductMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/conduct.md", size: 4492, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/conduct.md", size: 4492, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -617,7 +617,7 @@ func complySoc2PoliciesConfidentialityMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/confidentiality.md", size: 3653, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/confidentiality.md", size: 3653, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -637,7 +637,7 @@ func complySoc2PoliciesContinuityMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/continuity.md", size: 5043, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/continuity.md", size: 5043, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -657,7 +657,7 @@ func complySoc2PoliciesCyberMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/cyber.md", size: 4805, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/cyber.md", size: 4805, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -677,12 +677,12 @@ func complySoc2PoliciesDatacenterMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/datacenter.md", size: 3014, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/datacenter.md", size: 3014, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesDevelopmentMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\x8d\x31\x8a\xc3\x30\x10\x45\x7b\x9d\xe2\xc3\xd6\x5a\xd6\x5b\x19\xb5\x72\xb3\x8b\x0b\x13\xe5\x02\x42\x1e\x87\x09\xd2\x4c\xb0\x64\x07\xdf\x3e\xd8\xd5\x87\xcf\x7b\x3c\x89\x85\x1c\x82\x2e\xed\x1d\x57\xc2\x40\x3b\x65\x7d\x15\x92\x86\x91\x17\x4a\x47\xca\x84\x49\x33\xa7\xc3\xc4\xb4\xaa\x1c\xc5\x21\x0c\xa3\x9f\x4c\x8d\x8d\xeb\xc2\x54\x9d\x01\xee\xc1\x9f\x03\x58\x78\xdf\x7f\x77\xa6\xc4\xa7\xae\x37\xda\xb9\xb2\xca\x85\x58\xcc\xb1\x91\xc3\xff\x26\xe8\xf0\xfb\xd3\xf5\x97\x90\xb4\x9c\x39\x87\x3f\xe1\xc6\x31\x63\xd6\xb4\x9d\x8f\xb1\xd6\x1a\xf3\x05\xaf\x85\xe5\x81\xa0\x2a\x9f\x00\x00\x00\xff\xff\x38\xd7\x26\x45\xae\x00\x00\x00")
var _complySoc2PoliciesDevelopmentMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\xcf\x92\xdc\xb6\xf1\xbe\xef\x53\x74\xd5\xef\xb2\x5b\x35\x33\x6b\xfd\x4e\x8e\x72\x92\x77\x9d\x8a\x52\x76\x65\x4b\x92\x93\x33\x06\x68\x92\xf0\x82\x00\x0d\x80\x33\xa6\x4e\x7a\x8d\x54\xc5\x2f\xa7\x27\x49\x75\x03\x20\x41\xce\xac\x2d\x57\x7c\x88\x2f\x9a\x9d\x01\x81\x46\xf7\xf7\x7d\xfd\x87\xb6\xa2\xc7\xd7\xf0\xde\x35\xf1\x2c\x3c\xc2\x23\x9e\xd0\xb8\xa1\x47\x1b\xe1\x3b\xdd\xa0\x9c\xa4\x41\x78\x72\x46\xcb\xe9\x46\x48\xef\xec\xd4\xbf\x86\xf7\x8f\xdf\x3d\x3c\xdd\x04\x11\x75\x68\x34\x86\xd7\x37\x00\x1f\xde\x3f\xd0\x3f\x00\x7b\x78\x78\xf8\xfa\xf0\xea\xa6\x17\x3f\x3a\xff\x0e\x4f\x3a\x68\x67\x79\xc9\x1e\x94\x88\xf8\x1a\xfe\x36\x5a\x78\x05\xff\xff\xd5\xab\xaf\xf9\x01\xe9\x7a\x3a\xee\x35\xbc\xb5\x3a\x6a\x61\x40\x39\x39\xd2\x37\x37\xfb\xfd\xfe\xe6\xe6\xff\xe0\x69\xf4\x83\x0b\x08\xc2\x2a\x78\x2f\xdd\x80\x37\x37\xe2\x00\x1f\x3a\x84\x21\xff\xe2\x1a\x88\x9d\x0e\x30\xb0\x9d\xa0\x03\x44\x07\x0a\x1b\x6d\x11\x3c\xfe\x34\x6a\x8f\xb4\x61\x80\xc6\x79\xc0\x10\xc5\xd1\xe8\xd0\x69\xdb\xf2\x9e\xbd\xd0\x36\x0a\x6d\xe9\xef\xa3\x08\x68\xe8\xb1\xc1\xbb\x88\x32\x6a\x67\x21\x44\x61\x95\xf0\x2a\x3d\x2e\x5d\x3f\x08\x3b\x41\xc8\x2e\xdb\x81\xc5\x78\x76\xfe\x19\x14\x9e\xb4\xc4\xb0\x83\x80\xfe\x84\x3e\xec\x78\x77\x85\xe1\x39\xba\x21\x1c\xb2\xd5\x8b\x99\x62\x18\x8c\x46\xb6\x55\x18\x03\x63\x40\x1f\x60\x40\xdf\x38\xdf\x93\x2d\xe5\x04\xda\xb8\x04\x65\x07\x61\x0a\x11\x7b\x10\xaa\xd7\x56\x87\xe8\x05\xd9\xb8\xcb\x17\xb1\xa2\xe5\x9b\x26\x8f\x20\x39\x4d\x46\x7d\xd2\x91\xce\x39\xeb\xd8\x69\x4b\xdf\x83\xf3\xad\xb0\xfa\x23\x3f\x9b\x8d\x8a\xd3\xa0\xa5\x30\x66\x02\x6d\xa5\x19\x15\x06\xc0\x7e\x30\x6e\x42\x0c\xbc\xbb\x74\x36\x7a\x21\xa3\xe3\x9b\x05\x38\xa3\x31\xf4\x2f\x39\xc3\xa3\xc1\x93\xb0\x11\xf0\xe7\x88\xde\x0a\x03\x83\xf0\x7c\xa8\xb6\x27\x67\x4e\xa8\x20\x9d\xbc\xb6\xe8\xb6\x43\x8f\xda\x8a\x26\xa2\x07\x8f\x0d\x7a\x8f\x8a\xfd\x11\xe0\xf3\xa7\x7f\xb3\x47\x3e\x7f\xfa\xe5\x6e\xed\xb7\x7e\x0c\x11\x8e\x08\xbd\x50\x14\x5e\xa1\xb4\x99\x40\x9c\x84\x36\xe2\x68\x70\xe5\xce\x2b\x4e\x37\xc1\xd5\x9e\x47\x1b\xd1\x0f\x5e\x07\xdc\x9f\xb5\xc2\xec\xde\x74\x63\x5e\x26\xd9\x49\xa1\x04\x01\x15\x1c\xa7\x0b\x1f\x82\xf3\xe0\x2c\x1c\xb1\x13\xa6\xc9\xce\x5f\x2f\x20\xe8\x0c\xde\xa9\x31\x61\x4a\xf7\x83\xe1\x48\xa5\x10\x10\xcc\xbf\x11\xf2\xb9\xf5\x6e\xb4\x6a\xc6\xb7\xb6\x71\x0e\xe6\x16\xde\x68\xc3\xe8\x11\x04\xc7\x61\x9f\xc0\xae\x08\x7b\x92\xbf\x4e\x01\x0b\x3a\xf0\x0e\x83\x77\x12\x43\x02\x30\xa3\x84\xf0\x45\x36\xa2\x8d\xda\x23\x98\x99\xea\xae\x59\x70\x47\x9b\x68\x4b\x70\x4c\x77\xc8\xbe\xd9\x41\xe3\x5d\x0f\x3a\xd3\x75\x45\x30\x61\x85\x99\x82\x0e\x30\xda\xa8\x4d\x01\xab\x42\x22\xb9\x0e\x21\xc3\x0d\xcb\x55\x92\xd9\x81\x4d\x61\x1b\xd5\x48\x8c\xf2\xce\x60\xa6\x8f\xc7\x30\xd0\x3d\x8e\xda\x30\x66\x76\x89\xc3\x42\x76\xc4\xcb\x16\x8b\xb3\xaf\x91\x65\xb9\x56\xc2\xc1\x3f\x0b\x01\x66\x5f\xee\xbe\xe0\xd9\xe2\xc7\x40\x47\x5d\xbd\xec\x0e\x84\x97\x9d\x26\xbd\x28\xbe\x57\x18\x74\x6b\x77\x6b\xea\x46\x0c\x51\xdb\x96\xbe\x25\x62\xd1\x97\xf7\x6b\x20\xec\xc0\x0d\x98\x28\x1d\xee\x59\x97\xd0\x0a\x2b\xb1\x48\xc9\xc6\x8d\x01\x4b\x64\x31\x40\x2f\x26\x62\x45\xe3\x8c\x71\xe7\x44\x38\xa2\x26\x85\xef\xcf\xfc\x07\x9c\x45\x44\xdf\x10\x39\x7a\xa7\xd0\xec\x40\xc7\xf2\x94\x18\x06\xef\x06\xaf\x45\x64\x02\xa5\x4d\x96\xb0\x84\x00\xa4\x8a\xc2\x9b\x69\x07\xe7\x4e\x1b\x4c\xdb\x83\x68\xe9\x73\xed\xb7\xbc\x75\xfd\xa8\x14\x44\x0d\xf0\x38\xa0\x88\xc5\x32\xd0\x91\x6f\x7a\x42\x68\x44\xe8\x0a\x0b\xde\x91\x0a\xa0\x95\x18\x38\x64\xef\x74\x78\x86\x37\x74\xbf\xc0\xbb\xe7\x4c\x44\x59\x21\x7f\xca\x54\xa9\xc9\xf6\xf9\xd3\xbf\xc2\xcb\x29\x0d\x1e\x38\xaa\xb7\x94\xc4\xee\x16\xb1\x23\x83\xd3\xb5\x89\x1b\x43\x27\x02\x25\x36\xce\x50\xfa\x00\xef\xea\xb8\xbf\xc9\x71\x9f\x7f\x7d\xb3\x0d\xff\x23\x87\x7f\xfe\xfd\x43\x0a\xfc\xfc\xf7\xe3\x12\xff\xb7\xab\xf8\xcf\x2b\xfe\xbe\xc0\xe0\xfb\x05\x06\xd5\x06\x0b\x12\xd8\x05\x8f\xa3\xe7\x74\x66\x4c\x36\xbd\xf0\x82\x6e\x09\x67\xd2\x59\x10\x85\x8e\x3a\x80\x75\x91\xc2\xb0\x28\x52\x66\x42\x5a\xc0\x0a\x4b\x4b\x3a\x71\x22\x71\x38\x21\xa5\x6e\x01\x01\x23\x39\x4a\x44\x4e\x06\x42\xdb\x95\x3c\x68\x45\x6a\xd2\x4c\x6c\x88\x8c\x23\x65\x01\x74\x03\x89\x0a\xa5\x4d\x3f\x38\x4f\xf0\xa2\x45\x89\xc9\x79\x51\xa3\xe9\x72\x9c\xf9\xf9\x90\x51\x76\x9c\x57\xa4\x74\xa3\x8d\x60\xc7\xfe\xc8\xd9\x94\xa5\x4d\xc7\x09\xa4\x53\xf4\xb8\x77\x23\x79\xb5\xb6\x61\x47\x47\x11\xea\x5d\xec\xd0\x2f\x3b\x53\x56\xab\xac\xa3\x73\x0e\xf0\xb6\xb2\x9d\x2f\x75\x76\xa3\x51\x84\x54\x66\xbc\x42\x4a\x46\x01\x6d\xd0\x8c\xd2\xe4\x14\x3c\xa1\xa7\x25\x63\x40\x05\x6e\x8c\xb4\x8e\x7c\x5d\x69\x3b\xda\x93\xf6\xce\x32\x56\x0e\x33\x40\x17\x6c\x55\x09\xb0\xa4\x32\x2a\x2a\x0c\x12\x37\x84\x55\xf7\xec\xae\xd9\x02\x95\x42\x4b\xe1\xb9\x2e\xb5\x1c\xf0\x05\xaa\x8c\xce\x8f\x08\xc7\x31\x90\xb6\x86\xd5\x53\x87\x79\xd9\x53\x2a\x34\x40\x80\x27\x8e\x89\x99\x63\x07\xf8\xde\x79\x5c\x45\xd6\xd9\xed\xa2\x40\x20\x52\x3a\xc8\x31\x84\x39\xb9\xbf\xc0\x56\xb8\xf5\x85\xd4\x70\x2b\xee\xee\x16\x1b\x1e\xd3\x06\x20\xc2\x80\x32\x09\xec\x1c\xe4\x5b\x3c\xb4\x87\x1d\x39\xa2\x49\xa1\x13\x46\xc7\x69\xc7\x39\xb1\xf5\xfc\x31\xe7\x7c\xca\x0c\xd3\x1d\xf3\xae\x4b\xa2\x35\x41\xaf\xdb\x2e\x72\x02\x9f\x48\xcf\x58\xf2\x2b\x3f\x1c\x2a\x62\x9f\x34\x9e\xc1\x63\x3b\x1a\x11\x9d\x9f\xb6\x4e\x56\x17\xb9\x9c\xe5\x85\xf3\x07\xa3\x78\x2e\x0f\x77\x4b\x0a\x4b\x0f\xb6\xa3\x56\x5c\x4d\x86\x8b\xf3\x9a\x91\xb5\x62\x8e\x51\xeb\x84\xb9\x5c\x25\x47\xef\xc9\x8f\xf3\xb2\x6d\x46\x8e\x28\x3b\xeb\x8c\x6b\xa7\x2a\x71\x2c\xdb\xbc\xb5\x0b\xed\x06\xef\x5a\x2f\xfa\xba\x48\xd4\x29\x9b\x27\x09\xd4\xb6\xcd\x18\xa2\xff\x5e\x1d\x66\x95\xa3\xa8\x14\x43\xb2\x3e\x70\x6d\x75\x2f\xc7\x10\x5d\x9f\xaa\xac\xea\xb9\x1f\xac\x42\xcf\x4e\x81\xb2\x62\x4f\xd5\xa0\x45\xcf\xc1\xf3\x8d\x90\x1b\x28\xe7\x58\x97\x6b\xee\x0d\x49\xf6\x5c\x58\xdf\xad\xf7\x2f\xa0\x19\xbc\xfb\x11\x65\x84\xa8\x7b\x6c\xbc\xe8\xb1\xc2\x55\x92\x7c\x76\xd7\xe0\xb5\xf3\x3a\xea\x8f\xb8\x60\x2b\x38\x33\xb2\xff\xae\x53\x23\x21\x18\xa4\x0b\x91\xb7\x38\x8e\xaa\x45\xd6\x3c\xaa\xb7\x75\x69\x24\x2e\xb6\xab\x7d\xb9\xca\x89\xb4\xc9\xb5\xf8\xbc\xa1\xac\x7b\xaa\x0c\xbb\xc0\x5e\x3a\x7a\x79\xe2\x7b\xf1\x8c\x54\x1a\x1f\xc7\x09\x4e\xe1\x00\xc7\x51\x1b\xf5\xf9\xd3\x2f\x54\x1b\xa4\x4e\x6b\x63\x1a\x7a\x6e\x4a\xb8\xb1\x51\xe0\x12\x4f\x6b\x08\x89\xa3\x3b\xe1\x35\x99\xba\xd0\xa6\xfb\xeb\xa2\xf4\x42\xed\xb3\x95\xa5\x6f\xd5\x28\x09\x87\xb5\x5f\x22\x8a\x3e\x90\x51\xcc\x5b\x07\xd2\x53\x81\x40\x89\x2a\x55\xb1\x09\x6e\x57\xe3\x4a\x22\xe9\x53\x87\xa7\x6d\xe3\x45\x88\x7e\x4c\x26\xcc\x77\xaf\x0d\x5b\xf6\xf8\x4e\x87\x98\x78\x43\xed\x0e\x1b\x6c\x9d\xdd\x2f\xdf\x54\x69\xc6\x46\xef\xcc\x35\xcd\xac\xaf\x7c\x16\xe6\x39\x76\xde\x8d\x6d\xb7\xac\x7c\x98\x6f\xc2\x57\x48\x88\x5e\xb6\x4e\x2e\xba\x58\xde\xe9\xb6\xcb\x4b\xd7\x36\x25\xde\x93\xf0\x71\x01\x75\xc5\xd8\xb4\xe3\x55\x7d\x27\x1c\xdf\x1f\xd1\x62\xa3\xe3\x92\x35\x08\x26\x39\x52\x14\x61\x67\xd7\x1c\x78\xcc\x4d\x38\x87\x58\x61\x14\xda\xfc\xda\xc1\xd7\xce\xcd\xbb\x7b\xd6\x32\x2e\x1b\x65\x97\x50\x95\x8b\xae\x1d\x88\x08\x02\xa8\x91\xed\xc7\x7e\x57\xed\x9e\x9e\x61\xe9\xa9\x9a\xb0\x22\x7f\x55\xb4\xd7\x9d\x68\x7e\x8e\x1e\xab\x5c\x39\x97\xc8\x35\x90\x82\xf4\xfa\x58\xdd\x6c\xbe\xcf\x52\x50\x27\xf5\x28\x8a\x5e\xeb\x24\x04\x6c\x3d\xb6\x39\x39\x36\xa0\x46\xce\xe6\xf4\xc0\xf6\x97\x2b\xf5\x7f\xd9\xf8\x6a\xbd\x00\xb5\x8d\xe4\xbe\xd2\x66\xa1\x55\x7b\xd2\x5d\x60\x09\x2a\xdb\x08\x2a\x71\x39\x35\x64\x7d\x5f\x5f\x92\x36\x10\xd0\xa2\x45\x5f\xc7\x8c\x0c\x81\xc1\x88\x2a\x6c\x3f\x0c\x8a\x6b\xff\x2f\xce\x75\x17\xde\x69\xea\x26\xe2\x42\x4c\x59\x18\x0a\xa6\x32\xdd\x7b\x1d\x75\x4b\xa7\x7a\x0c\x5a\x51\x29\xf8\xeb\xc1\x86\xd3\x68\xe8\x26\xa5\x17\xbc\xbc\xaa\x55\xcb\x78\x07\x02\x0e\xc2\x6f\x25\x87\x13\x3a\x5d\xff\xb7\xaa\xb4\xdf\x59\x9a\xd5\x67\x6c\x94\xef\x3d\x46\x18\x87\x45\xd5\xea\xa5\x95\x15\x25\x0b\xce\xd3\xa3\x10\x9d\x17\x2d\x56\xf5\xd2\x07\x9f\x6b\xee\xda\x29\xb3\x88\x6a\x1b\xa2\x30\x66\xf1\x1e\xd7\x4e\xed\xe8\x67\x34\x66\xf7\x1e\x0d\x56\xe3\x2b\x3d\x77\xb5\xea\x52\x69\xa9\xd4\x66\xa9\xa8\x22\xb3\x15\xb3\x6b\xf2\xf1\x36\xd9\xb2\x9b\x6d\xc0\xb5\x86\xb1\xcf\x38\x0e\xeb\xdb\x1c\xb6\x5e\x2b\xa7\xec\x3d\x1a\xd6\xbe\x1a\x03\x13\x91\x41\x3e\x73\xc3\x76\x8d\xe4\x39\x5b\x5c\x61\xf9\xcc\x80\x34\xd3\xcb\xe5\x0d\x99\x98\x2b\x33\x0a\x01\xa7\xd3\x5b\x7d\xc0\xc3\x8e\x0a\x44\x8f\xdc\x6f\x15\x1a\x57\x71\x79\x70\x96\xc8\x0c\xa3\xd5\x71\xc5\xf2\x72\xdf\xea\xb1\x3f\x00\x6b\xe5\x84\x0d\xce\x6a\xc5\x57\x78\x05\x01\x59\x1d\x73\xb2\x82\xa3\x8b\x3c\x44\x89\x5a\x26\x7a\x4e\x56\xf4\xfc\xb9\x2a\xfc\x68\xa7\xe8\xe6\xee\xe9\x82\x82\xb0\x6a\x72\x37\x07\x2e\xfa\x70\x99\x1f\x72\x29\x49\x77\x09\x97\x9e\xcc\x63\x50\x6a\x7a\xd9\x36\xe3\x44\xe2\x6d\x1a\x63\x5e\x26\x69\x40\x4b\xb8\x56\x97\x07\x8d\x61\x06\x4b\xf6\xdb\x26\xab\x5c\x4b\xf8\x5b\x7b\xb4\x55\x38\xa0\x25\x2f\x6c\x10\x58\xf7\x44\x65\x16\xc5\x57\xab\x8b\xc1\x5c\x74\xd5\xb4\xdd\x0c\x18\xff\x10\x09\x2a\x53\x85\x2d\x32\x66\xaf\x6a\xe3\x62\xbd\x2e\x1b\x7c\x91\x53\x37\x32\x9c\xba\xe9\x79\xc8\x7b\x8d\xf0\xe5\x88\xe8\x05\x37\xcc\x3c\x0d\x8d\x67\x44\x9b\x4f\x4d\xdc\x32\x66\x1f\xa4\x30\xb5\xb1\x97\x11\x9b\x5b\x3c\x90\x1d\x26\x7a\xcf\xd3\x47\x68\xb4\xc1\xd5\x00\x74\x8c\x1d\x61\x53\xea\x38\x1d\x36\x13\x96\xdf\xca\x95\xdc\x1a\xb2\xa6\xd6\xe3\xf4\x13\x12\xfc\x82\xb3\x16\x53\xe5\x95\x26\xf3\xb9\xc7\x4d\x66\x70\x5e\x9c\x81\xd3\x8c\x56\x6e\x4a\xfb\x3c\x30\x4a\x73\x70\xa9\x87\x45\x83\x23\x18\x14\x54\x83\x9e\xdd\x3c\x54\x4e\xdb\x3b\xaf\x28\xbf\x3b\x8a\x2f\x3b\xf3\xaa\xbf\x52\x3f\x8b\x2f\x54\x10\x7f\x00\x8e\xae\x8f\x20\x2f\xb3\xda\x69\x53\x54\x88\xf0\x9c\xe7\xe6\x97\x03\x8e\x34\xab\x41\x33\x15\x66\xa7\x39\x7f\x3a\x2a\x93\x21\x87\x80\x5c\x70\x95\x44\xc7\x31\x0d\xa5\x8c\xee\x35\x57\xc1\x6e\xd3\xb0\xce\x1b\xd0\xea\x14\xb4\x64\x0e\x65\x86\x75\x13\xf9\x61\x64\x58\x54\x32\xb3\xfe\xfd\x69\x79\x0d\x73\x14\xf2\x79\x1c\xd2\x3c\xca\x27\x36\xbb\x75\xa3\x96\x87\xef\x05\x21\xef\x13\x50\xdf\x54\xd3\x89\x32\x09\x79\xe9\x8c\x32\x77\xab\xdc\x9d\xf3\x4e\x9d\x4d\x78\x9c\xc1\xcb\x8b\xcf\x47\xae\xda\x72\xa9\x2a\xa2\xec\x30\x6c\xba\xe5\xcc\x4b\x36\xfb\x2a\x19\x5e\x5c\x3e\xa0\xd7\x4e\x69\x59\x8c\x7b\x51\xf6\x5e\xdc\x41\x58\x4b\x45\xdd\x76\x74\x74\xb8\x9c\x72\x0a\x33\xbf\x78\x21\xc4\xac\xe3\xfa\x0d\xe6\xcc\x95\x30\x93\x79\xcf\x13\x3b\x06\xca\x56\xbe\x77\x29\xb3\x89\x31\xba\x5e\x64\xb4\x43\x2f\xc8\x96\x1d\x08\x8f\x33\x5d\xc9\x46\xe9\xbc\x47\x19\xcd\x94\x8a\xc7\xa6\x41\x02\x2f\x9a\x69\x7d\xab\xbf\xf2\x20\xd4\xb5\x79\xfc\x49\xbb\x14\xff\xf0\x74\x71\x65\xe0\x49\x98\xb1\xd4\x38\xb3\x75\xab\xdc\xc0\x87\x9d\x84\xd1\x5c\x74\x8b\x51\xe9\xb8\x6d\x37\xf3\xc1\xf3\x7c\x18\x9c\x6d\x1d\xb3\xd9\x59\x1d\x9d\xcf\x89\xac\x7e\x5f\x95\x20\xbf\xb8\x48\x61\x79\x85\x59\x0f\xd6\x4e\xda\x99\xfc\x4e\x8b\x1f\xb2\xa4\xa0\xce\xeb\x8f\xa8\x40\x76\xc2\xb6\xb8\x31\xe1\x1f\xc5\x4e\x7e\x61\x54\x3c\xc4\x42\x5a\xf2\x47\x31\xb2\x2e\xb1\xe6\xcc\x5c\xea\x8d\x45\x29\x66\x20\xce\x43\xb9\xf9\xad\xda\x53\x29\x19\xe0\x2f\xce\xc3\xb7\x3f\xa3\x4c\x43\xde\xb7\x56\x72\x0d\x02\xef\xd2\x0b\x21\xbc\x12\x1f\x51\xbd\xba\x5a\x67\x30\x12\xbb\x4e\x78\xc5\xbf\x31\x51\xaa\x9a\x31\x85\x54\x87\x4a\x9a\xd2\x38\xd0\xf3\xcb\xd0\x45\x96\x91\x94\xc7\x92\x27\xe6\x73\x8e\x63\x9b\xdb\xbe\xa5\x7d\x3c\x1a\x0a\xc8\x11\xd3\x6c\x3a\xb2\x4c\xb3\x3a\x64\xe1\xbe\xe8\xaf\xaa\x29\x5e\xad\xe4\xf0\x94\x18\x9d\xc2\x94\x99\x5e\x04\x35\xbd\xc2\x54\xe5\xa5\xae\xd5\x16\xe3\x04\xb7\x7f\xfa\xea\x0e\x94\x98\xf2\xbb\x2a\x4a\x34\x5c\xbe\x0d\xde\x9d\x74\xa9\xe4\x15\xfe\x94\xe0\xb9\x6e\x47\x07\xb1\xb4\xad\xf3\xfd\xf2\xb1\x07\xf8\xb6\x47\xdf\xa2\x95\xd3\x0e\xa4\xd7\x91\x60\xbf\x83\xa3\x47\xf1\xbc\x6f\xf4\xcf\xa9\x1d\xfc\x88\xde\xed\x95\x98\x36\x52\xc1\xfe\x2e\xd6\x5c\xde\x40\x04\xf8\x69\xd4\xf2\x99\x28\x48\xbd\x66\x08\xfa\x68\xbe\x6c\x22\xf5\x5b\xf5\xd0\xf2\x92\xe4\xa5\x8a\xe8\x4b\x2a\xf6\x32\x35\x2b\xd4\xe5\xb7\xd5\x73\x0d\x04\x1e\x7b\x77\x12\xe6\xb2\x14\x72\x95\xba\x55\x65\x51\xf9\x9f\x08\x56\x4f\xdf\x7b\x1c\x8c\x90\x9b\xe1\xf4\x23\x46\xa4\x04\x91\x5f\xbf\x78\x8c\x54\xe9\x6c\xe6\x96\xdb\xf6\x6c\xfd\xfe\xb6\xe8\x03\xbf\xf2\xf8\xef\xa7\x3b\xbf\x73\x4c\xf0\x3f\x32\x25\xf8\x4f\x00\x00\x00\xff\xff\xbe\x37\xed\x94\xe5\x22\x00\x00")
func complySoc2PoliciesDevelopmentMdBytes() ([]byte, error) {
return bindataRead(
@@ -697,7 +697,7 @@ func complySoc2PoliciesDevelopmentMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/development.md", size: 174, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/development.md", size: 8933, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -717,12 +717,12 @@ func complySoc2PoliciesDisasterMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/disaster.md", size: 10315, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/disaster.md", size: 10315, mode: os.FileMode(420), modTime: time.Unix(1545087106, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesEncryptionMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\x8b\xc1\xca\xc2\x30\x10\x06\xef\xfb\x14\x1f\xfc\xe7\xfc\x58\x4f\x36\xd7\xd0\x83\x9e\x8a\xf5\x05\x42\x1a\x65\xa5\xbb\x2b\x4d\x2a\xe4\xed\xa5\x3d\x0d\x0c\x33\x1a\x25\x7b\x0c\x9a\xd6\xf6\xa9\x6c\x8a\xd1\x16\x4e\x8d\x62\x5a\x4d\x9b\x78\x0c\x23\x95\x58\xb9\x3c\x39\x17\x4f\xc0\x63\x0a\x3b\x00\x87\x10\xfa\xff\x9e\x24\xbe\x6d\xbd\xe7\x2f\x17\x36\x3d\x12\x87\x39\xd6\xec\x71\xdb\x14\x1d\xce\xa7\xee\x72\x0c\xc9\x44\xb2\x56\x8f\xab\x72\xe5\xb8\x60\xb6\xb4\xed\x86\x9c\x73\x44\x7f\x08\x26\xac\x2f\x4c\x66\xfa\x0b\x00\x00\xff\xff\xd5\x3c\x94\xf4\x97\x00\x00\x00")
var _complySoc2PoliciesEncryptionMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x58\xcd\x8e\x1c\xb7\xf1\xbf\xf7\x53\x14\x60\xe0\x8f\x7f\x56\x33\xa3\x48\x81\x8d\x78\x83\x1c\x94\xcd\x1e\x14\x07\xca\xc2\xbb\x91\x2f\xb9\x54\x93\x35\xdd\xe5\x65\x93\x1d\x92\x3d\xe3\x16\x74\xf0\x6b\xf8\xf5\xfc\x24\x41\x15\xbb\xe7\x7b\x01\x59\xf2\x61\xfb\x32\x33\x35\x64\xb1\x3e\x7e\x55\xac\x5f\x7b\xec\xe8\x1a\x6e\xbd\x89\x63\x9f\x39\x78\xb8\x0b\x8e\xcd\x58\xa1\x89\xc1\x8f\xdd\x35\xdc\xde\x55\x09\x33\xa7\x35\x53\xba\xae\x00\x1e\xee\x6f\xe4\x03\x60\x09\x37\x37\xdf\xae\xbe\xad\x3a\xfc\x31\xc4\xef\x69\xc3\x89\x83\xd7\x25\x4b\xb0\x98\xe9\x1a\xfe\x31\x78\x78\x05\xaf\xff\xf8\xea\xcf\xba\xc1\x84\xae\x23\x9f\xaf\xe1\xad\xe7\xcc\xe8\xc0\x06\x33\x88\xa4\x5a\x2e\x97\xd5\x57\x70\x37\xc4\x3e\x24\x02\xf4\x16\xee\x4d\xe8\xa9\xaa\x70\x05\x0f\x2d\x27\xe8\xd5\x28\xb0\xb4\x66\x4f\x09\x42\x6c\xd0\xf3\x07\x14\x83\xd1\x41\xa4\xff\x0e\x1c\x49\x54\x25\x58\x87\x08\xb9\x25\x18\x12\x41\x58\x83\xfa\x15\x9a\x88\x7d\xcb\x06\x4c\xf0\x39\x06\x97\x16\x80\x09\xb6\xe4\x9c\x7c\xca\xea\x33\x15\xc7\xfb\x1e\x69\x4c\x0b\x60\x0f\x21\x5a\x8a\x90\x03\xf4\x31\x64\x32\x59\x37\x9b\xe0\xd7\x6c\xc9\x8b\x53\x9c\x47\x59\x98\xa9\x89\xfa\x15\x87\xdc\xca\x3f\x86\xf3\xa8\x9e\xf9\xe0\x23\xf5\x83\x65\xb5\x5e\x4c\x64\xbf\x0e\xb1\xd3\x9f\xab\x33\x97\xb1\xef\x1d\x53\x92\x23\xd1\x39\x48\x63\xca\xd4\xa5\x05\x88\xbd\xbd\x98\xbb\x80\x35\x1a\x76\x9c\x65\x95\x1c\x70\xa0\x0e\xb6\x9c\x5b\xf6\x6a\x64\x92\x88\xca\x71\xf2\xe3\x30\x80\xbf\xfe\xfc\x4b\x3a\xda\x94\xc8\x0c\x62\xbc\xf8\xd8\x44\xec\x8a\x51\x6f\x9c\x03\xea\x7a\x17\x46\xa2\xb4\x28\x91\x44\x93\x43\x4c\x0b\xe8\x31\xe6\x65\xe6\xae\xe4\x2e\x53\xd7\x87\x88\x71\x84\x6d\x88\x8f\x24\x0b\x12\xc5\x0d\x1b\x12\x8d\x1b\xb6\x2a\xd2\x95\xad\xe4\x7b\xd2\x6a\xa1\x1e\x21\xe4\x96\xa2\x7a\xdb\x53\x14\x93\x54\x07\x04\x0f\x35\xb5\xe8\xd6\x97\x1c\x80\x16\x37\xec\x1b\xd9\x64\x83\xba\x7c\x92\xbd\x5d\xd0\xd0\x35\x21\x72\x6e\xe5\x7b\x88\x92\x55\xd9\xd7\x61\xa6\x28\x78\xc4\x48\x90\x86\xfa\x47\xcd\x6b\x80\x7c\x98\x06\x6f\xa1\x1b\x52\x16\x10\xf7\x6e\x2c\xa7\x70\x5e\x55\xd5\x57\xf0\x37\x34\x8f\x4d\x0c\x83\xb7\x4f\x02\x56\x4c\x6e\xb9\x69\xc1\xd1\x86\x1c\x04\x3d\x83\x37\x73\xc6\xba\xde\x29\xf4\x8a\x3b\xec\x53\x8e\x83\x91\xef\x7b\x38\x9f\x65\xec\x22\xbe\xf7\x0e\xaa\x62\x81\xed\x0a\xde\x66\xe0\x04\x1b\xce\xe8\x20\xb7\x98\xcf\x03\x88\x36\xf4\x19\x10\x52\x46\x6f\x31\x5a\x41\x5d\x0c\x68\x5a\x09\xc3\xe5\x0a\x02\x69\x10\x29\x29\x28\x35\x45\x86\x7c\x96\xd4\x1d\x16\x09\xf9\x34\x44\x02\xf2\x76\x99\xc3\x92\xbc\xdd\x61\x6b\x01\xdb\x96\x1d\x01\xba\xa4\xb5\xd4\x85\x2c\xa9\x90\xc2\x89\xa1\xa7\x88\xb5\x40\x7a\x9c\xa2\x39\x37\x8a\xa3\x78\xa6\x9e\x0c\xaf\x4f\x9c\x16\xbb\x37\x64\x35\x6c\x43\xa2\xc5\x79\x61\x3f\xd2\x08\x1d\x7a\x6c\x54\xa6\x61\x9a\x6a\x99\x83\x2f\xb0\x3c\xdb\x33\x24\x31\xee\x20\x10\xa3\xb8\x69\x5c\x18\x2c\x90\xdf\x70\x0c\x5e\x17\x2b\x1c\xa6\x06\x5a\xa0\x70\x12\x68\x85\xd0\xdc\x3a\xd8\x5b\xde\xb0\x1d\x70\x57\xd6\x02\xca\xc3\x4a\xac\x47\xe8\x08\x7d\x7a\xba\x91\x49\xff\x2a\x41\x91\xc2\x87\x07\xac\x1d\xc1\x9f\xae\xa1\xaa\xfe\xd3\x63\x43\x75\x24\x7c\x84\xaa\x7a\xb1\xbc\xf4\x5c\x92\x9e\xc9\x4e\x04\x2f\xaa\x8f\x70\x75\xf5\x0e\x3b\x45\xdf\xbd\xda\xfd\xf2\xea\x0a\x44\x7a\x73\x68\xe2\x24\xdb\xdf\x2c\x93\xe0\x3b\x1a\xe1\x9e\x3f\x90\xfc\x54\x5d\x0f\xa3\xf6\xa5\xab\x2b\x98\x1f\x95\x86\xe0\x0e\x44\x2a\x7b\x33\x27\x5a\xfe\xf8\x08\x47\x4f\xd1\xf5\x76\x1f\xbc\x69\xf3\xc9\xb2\xcb\xb2\x33\x5d\x2f\xfe\x7a\xe9\xb9\x24\x3d\x93\x9d\x08\x24\x5e\x77\x43\xed\xd8\x80\x78\x7e\x78\xe4\xbf\x7a\xf2\xf7\xf7\xff\x3c\x94\xbd\xb9\xbd\x5f\xbe\xfe\xfa\x9b\xbd\xe0\xf5\xd7\xdf\x2c\x6b\xce\x8a\x5a\xf5\xf1\xad\x5f\x47\x2c\xfd\x41\x4a\x4b\xd0\xf9\x99\x3e\x7e\x84\x37\xf3\xdd\x54\xd0\xf6\x25\xf1\xfa\x5d\xf1\x75\xe9\x79\x0e\x76\xfd\x1d\x33\x1e\x8e\x4a\xf0\x05\x79\xfc\x8e\xc6\xf4\x1c\x7d\x7c\xae\xb1\x7f\xcf\x31\x4b\xa7\xbc\x8b\xbc\xc1\x4c\xa7\xb1\x97\xb6\xfd\xc9\xb1\x7f\x47\x59\xaf\xac\xff\x7f\x7f\xf7\xee\x0f\x7b\xf3\x45\xd7\xfb\xbb\x77\xbf\xc5\xc7\x8f\x7a\xc5\x3e\xc7\x78\x5d\x7a\x9e\x83\x5d\x3f\x50\x9d\x38\x13\x1c\x16\xcc\x3e\x8f\x0b\xb8\xb9\xfd\xfe\x01\x3e\x31\x8f\x37\x14\xb3\x4c\x00\x33\x1e\x3e\xdb\x47\xf8\x5d\x9d\xac\x76\xb7\xf0\xd1\x85\x08\x37\xf3\x9d\x5d\x55\xff\xe7\xeb\xd4\xff\xa5\xaa\xea\x15\xdc\xfe\x64\xa8\xcf\xb0\x6d\x29\x52\x19\x7f\xb7\x9c\x48\x46\xb1\x4c\x76\x51\xf0\xa5\x63\x43\x4d\xd3\xdc\xa2\x83\x72\x6e\x89\x23\x84\xad\xa7\x28\x63\x87\x59\x9d\x1c\x76\xb4\x6f\x9a\x38\xc8\x02\x36\x28\xe3\x25\xb8\x90\x64\x88\x6f\xd1\x37\x32\xa2\x80\xa5\xdd\xcc\x29\xda\x85\x75\xe8\x6c\xac\x03\x55\x1f\x59\x42\x8c\xc6\x50\x4a\xf3\xe8\x01\x1d\xc9\x76\x4e\x5d\x19\xd7\x23\x6d\x64\x9e\x1a\xbc\x30\x9e\x10\xf9\x03\x59\x9d\x51\xa5\x32\x6b\x34\x8f\xa2\x6d\xe8\x8b\x5d\x32\x72\x42\xa4\x66\x70\x18\xa1\xc6\xc4\xe2\x82\x5d\xc1\x0f\x2d\xf9\x79\xfa\xb2\x0b\x30\x43\xca\xa1\x93\xa1\xf2\x29\xda\xa2\x23\xd8\xb2\xc6\x44\x16\x52\x58\xe7\xad\x8c\xf0\x21\x42\xef\x30\x2b\x7b\x08\xeb\x35\x45\x1d\xf2\xa7\x50\x68\x6a\x72\x80\x50\x67\x64\x7f\x34\x6d\x45\x6a\x30\x5a\xf6\xcd\x75\xa5\x7c\x95\xcb\x00\x77\x3c\x78\xe5\x20\x19\x1c\xe4\xc0\x63\x16\xc8\xf1\x84\xc8\x4d\x2a\xde\xf8\x11\x0c\xf6\x65\xa0\x55\x26\x27\x23\xb8\xd8\x89\x1b\x64\x37\xdb\x83\xce\x85\xed\x34\x52\xce\x6c\x69\xef\xbf\x2c\x90\x94\xec\xd3\x7e\x4a\x71\x82\x1b\x94\x30\xac\x8e\x6c\x2f\xac\x34\x8f\x73\x00\x4d\x18\x7c\x8e\x62\x45\xc1\x5b\x7e\xc2\x3f\x31\x6f\xf6\x31\xe5\x50\x82\x9a\x23\xfa\xb4\xa6\xf8\x94\x95\xbf\xfe\xfc\x8b\x10\x7f\x5c\xed\x66\xdf\x89\xa3\x1c\xd3\x75\x37\x2e\x77\x63\x3a\xed\x2f\xd3\x39\x41\x4d\xd8\x50\x9c\xc6\x59\x34\x26\x44\x8b\xde\x50\x21\x5c\x62\xae\xc3\x6d\x3a\x76\x67\x94\x29\xbf\xd1\xe9\x3d\xc4\x52\x42\x13\xb8\x94\x53\x4c\x11\x60\x2f\x94\xc3\xb4\x62\x54\x4c\x3b\x7a\x59\x02\x2a\xd7\xc1\xea\xf0\x6a\x57\x6b\x7c\x50\x8b\xe6\x40\x6c\x38\x38\xad\x03\x3f\x4e\x56\xec\xcf\x11\xaa\xc6\xde\xb8\xc1\x2a\x8d\x11\x02\x9c\x5f\xd2\x4f\xf2\x01\x51\x6a\x8b\x0b\x9f\x2b\x71\x39\x70\x5b\x95\x97\x7a\x86\x9b\xd0\xf5\xa2\xdc\x04\x05\x92\xa6\x5d\x19\x91\x9f\xdf\x74\xcc\x04\xad\xd0\xbb\x7f\xaf\xee\x57\x67\x67\xed\xa9\xcb\xcc\xb1\x87\x04\x06\xfd\xce\x93\x89\xb8\x1d\x2b\xae\x85\xbe\xa2\x22\x43\x46\xca\x5a\x18\x8f\x14\x7b\x5f\xde\xc9\xa4\xfd\x3b\x80\x13\xfe\xb4\x6b\x31\x25\x9e\x5a\xf4\x12\x82\x5d\x35\x16\xb8\x0f\x39\x48\x61\x18\x49\xff\xb4\x3d\x9d\x34\x94\x45\x21\x87\xa4\x80\xc3\x86\x16\xda\x36\x86\xbe\x50\xb2\x90\x77\xef\x4b\x0a\xab\xbd\x9f\xe8\x9f\x68\xd4\x82\x9d\x20\x7f\x62\xdf\x8c\xd2\x99\x79\x6d\xd8\x96\x35\xd3\xe1\x02\xf0\x03\x4d\x4b\x4b\x89\x1b\x2f\xbd\xb7\x00\x65\xb1\x47\xde\xc4\x49\x0b\xb1\xd5\x04\xbe\xb4\xa4\x9f\x47\x6d\x44\xcc\x6d\xc8\x53\x14\xac\x48\x3d\x1c\xa6\x5b\x6d\xff\x1c\x6b\x6d\xc7\x9e\x53\x8e\xd3\x19\x5f\x62\xbc\x89\x84\x99\x16\x90\x4c\x4b\x76\x70\x04\x96\x1c\x89\x80\xbc\x74\xa3\x97\x96\x93\x76\xa5\x39\xe6\x05\x47\x89\x32\x0c\x09\x1b\x2a\xef\x36\x66\xa4\xfc\x16\x87\x4a\x27\x99\x2f\x84\xf9\x32\x98\xdf\x70\x48\x9d\x46\xda\xf5\x2a\x69\x73\x3d\xc5\x19\xa0\x8e\xd7\x94\xb9\xa3\x4f\x3d\x4b\x8d\xa7\x72\x02\x66\x70\x84\x29\x43\x90\x4e\x42\x1b\x8a\x23\xbc\x7a\x0d\x5d\xf0\xb9\x15\xe3\xab\xff\x05\x00\x00\xff\xff\x69\xcf\x17\x8d\x05\x15\x00\x00")
func complySoc2PoliciesEncryptionMdBytes() ([]byte, error) {
return bindataRead(
@@ -737,7 +737,7 @@ func complySoc2PoliciesEncryptionMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/encryption.md", size: 151, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/encryption.md", size: 5381, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -757,7 +757,7 @@ func complySoc2PoliciesIncidentMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/incident.md", size: 8552, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/incident.md", size: 8552, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -777,7 +777,7 @@ func complySoc2PoliciesInformationMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/information.md", size: 5359, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/information.md", size: 5359, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -797,12 +797,12 @@ func complySoc2PoliciesLogMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/log.md", size: 4307, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/log.md", size: 4307, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesMediaMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\x8b\xc1\x4a\xc6\x30\x10\x06\xef\x79\x8a\x0f\x3c\x47\xac\x07\x95\x5c\x73\x52\x28\x94\xd6\x17\x58\x93\xb5\xec\x4f\xb2\x0b\x4d\x5a\xe8\xdb\xff\xb4\xa7\x81\x61\x46\xa9\x72\xc0\xcc\xd5\x0e\xfa\x2b\x8c\x91\xb3\x10\x48\x33\x62\xb1\x3d\x63\xe9\xb6\xd1\xca\x98\xac\x48\x3a\x1d\xa5\xcd\xf4\xac\x01\x63\x9c\x5c\xa3\x2e\xed\x5f\xb8\x05\x07\xfc\x2e\xf1\x02\xe0\x11\xe3\xc7\xeb\xa7\xab\xf4\xb0\x6d\xe6\x43\x9a\x98\xde\x89\x47\xa6\xce\x01\x3f\xbb\x62\xc0\xfb\xdb\xf0\x75\x0f\xc9\x6a\x65\xed\x01\xdf\x2a\x5d\xa8\x20\x5b\xda\x2f\xe3\xbc\xf7\xce\xbd\x20\x5a\x15\x5d\xb1\x98\xe9\x33\x00\x00\xff\xff\xac\xf1\x27\xb5\xaf\x00\x00\x00")
var _complySoc2PoliciesMediaMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\xcd\x6e\x1c\x47\x92\xbe\xf3\x29\x02\xf0\x85\x32\x9a\xdc\xf5\x1e\x76\x17\xba\x59\xa4\x06\xe8\x81\x69\x09\x24\x01\xc3\xc7\xe8\xac\xa8\xae\x30\xb3\x32\xcb\x99\x59\xdd\x6a\x9f\xfc\x1a\x03\xcc\xbc\x9c\x9f\x64\x10\x91\x59\x7f\xfd\x43\x49\x83\x19\xc0\x3a\x48\x54\x77\x65\xc6\xff\x17\x5f\x44\xd1\x61\x4b\x6f\xe1\x91\x5a\xbf\xc3\x8d\x25\x78\xa0\x8a\x11\xd0\x55\x70\x67\x7d\x5f\xc1\x53\xf2\x01\xb7\x04\x1f\xbd\x65\x73\xb8\x42\x13\xbc\x3b\xb4\x6f\xe1\xe1\xee\xe3\x55\xc4\xc4\xb1\x66\x8a\x6f\xaf\x00\x9e\x9f\xee\xe4\x1f\x80\x1b\xb8\xbb\xfb\xdf\xdb\xff\xbb\x6a\xf1\x17\x1f\x1e\x69\xc7\x91\xbd\xd3\x47\x6e\xa0\xc2\x44\x6f\xe1\xaf\xbd\x83\xef\xe0\x7f\xfe\xfb\xbb\xff\xd7\x03\xc6\xb7\x2d\xb9\xf4\x16\xd6\x8e\x13\xa3\x85\xca\x9b\x5e\x3e\xb9\xba\xb9\xb9\xb9\xba\xfa\x06\x3e\xf6\xa1\xf3\x91\x54\xab\x27\xe3\x3b\xba\xba\xc2\x5b\x78\x6e\x38\x42\x18\x15\x6f\x45\xf1\x15\x18\xd5\x3a\x16\xad\xe5\xc4\xbb\xc0\x6e\x0b\x3f\xfb\x3e\xc0\x87\xbd\x83\x7b\xda\xb1\x21\xb8\x7e\xf7\xf3\x87\xfb\x37\xd0\xa9\x59\x50\x51\xcd\x8e\x22\xa4\x86\xc0\x6f\x7e\x21\x93\x78\x47\x71\x05\x81\x7e\xed\x39\x90\x28\x13\xf5\x32\x6e\x3b\xab\xff\x95\x3b\xd9\xc5\x14\x7a\x93\xc4\x3e\xa8\x7d\x50\xb1\xf2\x45\x85\x09\xc1\xbb\x53\xed\xd8\x15\x05\xc9\xed\x38\x78\xa7\x17\xaf\xf4\x66\xef\xa0\xa3\x10\xbd\x43\x6b\x0f\x37\x7e\xef\xa8\x82\x4a\x75\x55\x3d\xb6\x18\x2a\x4b\x31\x82\xaf\xf3\xf5\xc6\x62\x8c\x5c\xb3\x41\x91\x0f\x96\x76\x64\x6f\x27\xc7\x14\xc3\xb0\xeb\x2c\x8b\x61\x1e\xd0\x5a\x60\x57\xfb\xd0\xe6\x13\x22\x54\x6f\xda\x73\x6a\xd8\x65\xdb\xc3\x16\x1d\xff\xa6\x0f\xfc\xf1\xfb\xdf\xe2\xe2\x40\x24\xd3\x07\x4e\x07\xe8\x82\xdf\x06\x6c\x57\x80\x11\xf6\x64\xad\xfc\x2b\xb7\x5f\x0a\xc6\x21\x26\x6a\xb3\xff\x2e\x99\x08\xc4\xa9\xa1\x00\xf9\x43\x1f\xc0\x78\x97\x82\xb7\x96\x2a\xd8\x1c\x4e\x74\xfb\xac\xa1\x7d\xa4\xa0\xbe\x5a\x18\x50\x14\xb9\x60\x70\xb9\x30\x1d\x3a\x36\xa2\x21\xb0\x33\xb6\xaf\x44\xb9\xb6\xb3\xfe\x40\x94\x6d\x50\xd5\xd0\x24\x1f\xe2\xd2\x05\xee\x00\xf4\x29\x51\x70\x68\xa1\xc3\x90\x54\x9f\x06\x93\x64\x38\x01\xbb\xe4\xf5\x2c\x9a\xa4\x2a\x2c\x1c\x33\xd7\xf3\x75\xdb\xe1\xba\xa1\x40\xec\xb0\x4e\x14\x20\x50\x4d\x21\x50\xa5\x86\x47\xf8\xe3\xf7\xbf\xab\xe9\x7f\xfc\xfe\x8f\x37\x4b\x07\xb5\x7d\x4c\xb0\x21\x68\xb1\x22\x08\x84\x15\xdb\x03\xe0\x0e\xd9\x6a\xc4\xe6\x7e\xbb\x95\xa2\x7b\x87\xe6\x65\x1b\x7c\xef\xaa\x13\x5f\xcf\xab\xa5\x0b\xde\x50\xd5\x07\x2a\x15\x80\x35\xd9\x03\xf4\x51\xca\xe0\x4b\xaa\xf3\x62\x42\x24\x0f\x96\x5b\x4e\x39\x47\xad\x97\xd4\x0f\x40\x9f\x3a\x1f\xfb\x40\xb7\x00\x4f\xbd\x69\x44\x66\xab\x71\x1e\xee\x1c\xcc\x8c\x29\xb0\x49\xf6\xb0\x70\x26\x19\xec\x23\xc9\xe3\xa2\x7a\x24\x17\x59\x0a\x3d\x8b\xc8\x91\x42\x97\x4f\x7b\xf1\xa9\xd7\x24\x69\x6f\xe1\x5d\x39\x49\x68\x9a\x72\x3c\xd2\x28\x33\x1d\x3a\x49\x8d\x20\x31\x96\xd8\x38\x91\x4b\x5d\x43\x2d\x05\xb4\xa2\x76\xe7\x43\x52\x3f\xb0\x03\x87\xa9\x0f\xb4\x02\x4e\xa0\x2e\x8d\x91\xe5\x1b\xf1\xde\x49\xa8\xc5\x09\x82\x7c\xf2\x05\x6e\xd8\x4a\xf5\x25\x0f\x7e\x47\x21\x12\xcd\xea\x44\x9f\x58\xe4\xfa\xc2\x02\xe0\xba\x78\x64\xaa\xe2\x98\xd0\x55\x18\xaa\xac\xb9\xf3\x09\x6a\x6f\xad\xdf\x53\x35\xab\xae\x01\x8a\x45\x4e\xe4\x98\x62\xb6\x3e\x90\xb8\xaf\x20\x5f\x47\x21\x21\x3b\x89\x78\xf2\x9f\x89\xf9\xea\xd5\xa0\xdf\xc2\x7b\x71\x70\xb9\x39\x17\x0b\xbb\xf8\xb5\x30\x2c\xbe\x08\xb4\xe5\x98\x82\x3a\x63\x05\x2d\x3a\xdc\xea\x01\xf9\x99\x5d\x22\x87\xce\x14\x6d\x2a\x8e\x9d\x97\x4c\xf0\x4e\xac\xd3\x18\x4b\x48\x67\x69\x95\x3d\xf2\xd3\x80\x1b\x63\x29\xac\x54\x56\xa2\xd0\xce\xb2\x69\x1e\x06\x2d\x4f\x4d\xe6\xf9\xa7\x9a\x6a\x1c\x47\x0c\xa7\x4a\x6a\xf7\xf1\xfd\xd3\xf3\xe3\xfa\xee\xf9\xfd\xbd\x04\xf6\xee\xc3\x8f\x7f\x59\xdf\xbf\xff\xf1\x79\xfd\xfd\x0f\x92\x36\x68\x8c\x0f\x95\x68\x9d\xc1\x43\x04\xdf\x4b\xde\xde\x2d\x1b\x41\x6e\xd4\x70\xad\x92\x49\x1e\xbf\xc6\x37\x6f\xb4\xa4\x1f\x87\x8f\xa2\x9a\x73\xf9\xb4\x7e\xfd\x7d\x8c\x94\x60\xed\x76\xe4\x92\x0f\xf9\xb3\xa7\x21\x79\xd6\xce\x70\x25\x79\xf1\x48\xb1\xf3\x2e\xd2\xfc\xe4\x7b\x67\xc2\xa1\x5b\xdc\xf7\xcd\xfc\xfb\x6f\x8f\xe8\xc6\xb7\x57\xca\x03\xf8\x16\xbe\x3f\xed\x22\xd9\x76\x75\x6c\x5f\x38\x40\xc9\x8b\xb1\xd3\x4a\x02\xb2\x13\x6d\x92\x3f\xad\xa1\x01\x11\x72\x46\x90\x54\xc5\xa5\x6e\x77\x64\xf2\xc2\x89\x1b\x75\xe2\x2b\x7a\x5a\xb9\x5d\xef\x5e\x9c\x9a\xc9\xbf\xe1\x72\x33\xe7\xca\x44\xf8\xb5\xc7\x90\x28\xd8\x03\x6c\x30\xb2\xe6\x09\x39\xc1\xb7\x92\x22\x9a\x25\x31\xb1\xb5\xf3\xa6\x35\xd4\x7c\xc1\xb1\xa3\xc6\x08\xe5\xcf\x77\xb7\xf0\xec\xe7\x52\x0f\x80\x0e\x38\x51\x9b\xb3\x56\x2a\x2f\x0c\x77\x1c\x1b\xa3\x4a\x9b\x86\xcc\xcb\xe0\x2c\x39\x38\x65\xde\x89\xeb\xd6\xb3\xf4\x1e\xb3\xe4\x41\xcb\x2e\xc0\xf5\xfa\xe9\xe1\xcd\x91\x6a\x0d\xc1\xfa\xe9\x21\x0b\x4a\x81\x30\x69\xff\x3c\xd6\x63\x00\x65\x81\xa7\x0d\x81\xf5\x06\x53\xae\x16\xeb\x63\xca\xf5\x8b\x13\xa6\xf1\x90\x96\x81\x04\x72\x47\xdf\x5b\xbf\xdd\xe6\xd8\x9c\x2b\xa3\xcf\x66\xf5\x22\x11\xcc\x3c\x11\x9e\xbf\xcc\x91\xde\x55\xbd\x49\xda\x61\xb1\xeb\x82\xef\x02\x63\xa2\x39\x14\x15\xa4\xce\x6e\x96\xd0\xbb\x44\x61\x87\x36\x2e\x0e\x94\xfc\x1e\xd0\xa9\x20\x6c\x94\x4e\x88\x02\x27\x84\x52\x15\x2b\xc8\xb1\x48\xfa\xb3\xc5\x0d\x59\xfd\x89\x92\xf9\x72\xcd\x57\xb0\x97\x5e\x06\x9d\x50\x1d\x21\x44\xab\x6c\xcb\x3c\x3f\xd1\x01\x5a\x25\x3c\x49\x9b\xd1\x06\xcd\x4b\xdf\x81\xf1\xdd\x61\xb8\x75\x0e\x7b\x43\xf4\x8a\xa9\x19\xf4\x81\x3e\x49\x5f\x99\x14\x7b\x3c\xe7\xc1\xa9\x27\x4b\x0c\x95\x66\x40\x67\xd1\x14\x45\x1a\xa1\x5f\x10\xa8\xea\x0d\x55\x10\x38\xbe\x88\xfc\x9a\x83\xaa\x55\x5b\xef\xab\x8c\x16\xad\x80\x39\x8c\xb2\xd6\x85\x09\x94\x46\xae\xce\x1f\xbb\xce\x59\x44\x5f\x9d\x8d\xf0\xdb\x45\x6e\xbf\x5b\x2a\x6b\xbd\x79\xa1\x0a\x0c\x6e\xd8\x51\x12\x85\xaa\x80\x7b\x0a\xcb\x82\x90\x41\x4b\xb2\x40\xa8\x43\xc6\x4f\xaa\x66\xc4\x44\x70\x40\xd2\x94\xf2\xf7\xdc\x35\x03\x92\x9d\xcb\xe8\x13\x04\x5e\x64\x70\x75\x0c\x65\x17\xe6\x95\x91\x8b\x0d\x21\xa0\x80\x91\xaa\x15\x94\x1e\x5b\x02\x38\x7c\x5b\x51\x4c\xc1\x1f\xe4\x81\x0d\xd5\x62\x4d\xc6\xaf\x40\x7d\xcc\xa4\x3e\x77\x5a\xf9\xb9\x9e\x14\xf8\xa9\xa1\xcb\x92\x85\x9a\x0c\xa7\x56\x73\xa1\x39\x75\x55\x74\x8e\x8e\x7e\x29\x88\x12\xfd\x08\x9e\x85\xcd\xe9\xe5\x54\x41\x1d\x7c\xbb\x04\xe7\xdc\xd7\xbf\x5d\x8c\xba\xcb\x6e\xb4\x64\xac\x03\x63\xff\xcf\xf4\xa4\xa5\x6a\xf0\x38\xa3\x30\xd0\xe2\x41\x0e\x48\xb0\xdb\xce\x72\x6c\xf2\x70\xd0\xa2\xeb\x33\xc5\xc4\x3e\xf9\x56\xcb\xab\x25\x74\x52\x4f\x9f\x37\xe3\xcf\xd1\xb2\xfe\x64\x6d\x0a\x1e\xe7\xba\xfc\x0b\x7e\xbf\x04\xb0\xe7\x62\xf0\xd5\xfd\xa1\x1c\x43\x69\x71\xdb\xde\x62\x98\x75\x0a\x65\x99\x3a\xb2\x0e\x8f\x19\xef\x6a\xde\xf6\x03\x0d\x46\x63\x28\xc6\x21\x18\x2b\x49\x52\x75\x87\xdc\xdf\x7a\xc7\x79\x7d\x71\xd4\x28\xee\x0b\x38\x9c\x4f\xa1\x29\x55\x3a\xcb\x19\xde\x65\x94\x4c\x60\x09\x63\x02\xef\x08\xbc\x4e\xf6\x5d\x73\x88\xd2\x46\x72\x17\xd0\xb8\xdf\x53\x47\x4e\x81\xb9\x18\xb7\x14\xd1\x05\xbf\xe3\x8a\xc2\x2a\x33\xee\x41\xc2\xbc\x1a\xb2\xef\xcb\xb8\x3e\x18\xab\x03\xcc\x3c\x08\x27\x95\xa7\x28\x2b\x95\x7b\x5e\x60\x9c\x66\xc1\x8a\x5a\xef\xb4\x08\x69\x35\xec\x28\x52\x13\x7c\xbf\x6d\x26\xd6\x81\xc6\x04\xaa\x38\x15\x2f\x4f\x67\xf4\xbf\xc9\xf7\x41\x21\x33\xfb\x41\xb3\x24\x0b\x48\x0d\x71\x80\x1a\x8d\x0c\x76\x5c\xd0\x2e\xa3\xbc\x60\xa8\x4f\xcd\xe8\x35\x99\xd5\x5d\x05\x64\xc9\xa4\xe0\x5d\xfe\x68\x55\xe6\xec\x0d\xc5\x34\xf4\x69\x9a\x65\xe0\xfa\x95\xbc\xfb\x5c\x97\xcb\xc5\x3c\xab\x96\xb1\x07\x8c\xcd\xe9\x4c\xef\x59\x34\x99\xa3\x14\x5a\x36\x91\x0c\xc6\xfa\xd7\xf9\xc4\xca\xa6\x21\x24\x32\x8d\xf3\xd6\x6f\xb3\x07\x74\xdf\x10\xe3\xd8\x14\xb5\x5c\x76\xd3\xa6\x64\xfd\xf4\x30\xeb\xef\xda\x5b\xca\xc8\x8f\xe7\x1d\x21\x43\x2e\x47\x71\x07\xbb\x7e\x68\x32\xe5\xbb\x7f\x6b\x93\xf9\x78\x3c\xf4\xe6\x4d\x64\x9c\xfa\xcd\x87\x59\xa2\xa2\x3d\x6a\xfe\x4a\x28\x56\x90\x02\xba\x58\x56\x3e\x3e\x0c\xee\xa0\x57\xf7\x87\xa2\x9b\xc6\xba\x77\x15\x9d\x6e\x19\x14\x18\xd5\xd4\xd8\x70\x97\x39\xf5\x49\xc7\x0a\x94\xd3\x45\x51\x98\xb7\x8d\xf6\xb5\x01\xd5\x95\x80\xaa\xba\xb4\x23\x29\x65\xad\x8f\xdc\x0a\x84\xb9\x9f\xe0\x78\x56\x6c\x59\xa6\xe2\x56\x2d\x73\xa5\xde\xe3\x4e\xa4\x60\xa3\xa2\xc4\x8e\x82\x40\xe4\x34\xcd\xcb\x7d\x97\xb7\xa6\x13\x1e\x2e\xf9\xd9\xda\xc5\x84\xd6\x4e\x6b\xd0\x05\xce\xd6\xf0\xe0\x37\x6c\x69\xd8\x14\x3f\x4c\xd2\xae\x1f\xee\x1f\xde\x40\xf4\x75\xda\xe3\xd0\xf0\x24\xfc\x54\xd7\x79\x65\x6c\x0f\x45\xb9\xd5\xe8\x1b\xb9\x7f\xcf\x5d\x59\x32\xbd\x12\x81\xe1\x80\x26\xd0\xc5\x0d\xc9\xc2\x90\x07\xd1\x1b\x33\xc9\xe4\xa8\xde\xf8\xc5\x6f\x20\x71\xb2\x79\x57\xf9\x5f\x92\x22\x7a\x55\x14\xb0\x6c\x7c\xe0\xdf\x32\x42\x4b\x51\x5c\xcc\x97\x73\xbb\x28\xd5\x71\x23\x55\x49\x71\xb9\xf7\xd4\xe5\x54\x37\x82\x73\x1c\x37\xcb\x1b\x8c\xc3\x06\x74\x6a\xa3\x39\x5b\xd5\xc8\x9c\x36\x5f\x68\xd8\x42\x42\x17\x7c\xc3\x1b\x4e\xc3\x4d\x5a\xe2\xaf\x2d\xd0\x15\x3c\x85\xa6\xe8\x96\x4a\x34\x2a\x3c\x32\x2f\x8a\x25\x9a\xa8\x31\x1d\x97\x7b\x81\xf2\xc2\x4c\xe4\x4d\x89\x3a\x2b\x62\x9d\x14\xb9\xe6\xb2\xd7\x9b\x90\xf2\xfa\xe3\x7a\xfd\x46\x2d\x2e\x53\xeb\x50\xbb\xb3\x62\x0d\x93\x2b\xca\xe4\x9b\xb8\xa5\xcc\xb0\x2e\x58\xb1\xac\x96\xbc\xab\x53\x88\x34\x86\xba\xbc\x5d\xec\x55\xef\xd9\xae\x6c\xc0\x5c\xbf\x89\x14\x0a\x4a\x8e\xab\xf1\x4b\xde\x5a\x16\x8b\xb0\x47\x7f\x06\x98\x86\xab\x65\xe6\xa3\x0a\xfa\xee\x2c\x27\x39\x0a\xeb\xfd\x43\x89\x44\xd5\x79\x76\xd2\xb4\x7c\x2a\xcb\xbe\xb1\xaa\x86\x8b\x39\x57\xe9\xc9\xbc\x88\x99\x27\x89\xbf\x8e\xae\x7f\x3a\xbb\x82\x5b\x6c\x42\x8f\x67\x96\xaf\xee\x66\xb3\xf5\xc5\xd1\x55\xa5\x71\x8f\x8d\xab\xc3\x18\xf7\x3e\x48\xdc\xd9\xad\xa0\x77\x32\x07\x42\x87\x49\x06\xe6\x15\xd4\xec\xb6\x14\xba\x20\x6e\xf0\x01\x24\x6a\x3b\xb4\x82\x32\x23\xaf\x68\xc9\x34\xe8\x38\xb6\xaf\xcb\x56\x36\x63\xbc\x73\x64\x14\x95\xb3\x22\xc5\xd1\x83\x79\x7b\x0e\xa4\xef\x8f\x1c\xa5\xbd\x0f\x2f\x47\xae\x2b\x8d\x32\x57\xc7\x34\x64\xf5\x29\x72\x45\xe7\x58\xbb\xc2\x41\x17\xa8\xe5\x28\xf5\xc5\x65\xd7\x32\xec\x69\xa8\x4e\xd0\x3b\xb1\xd5\x55\x92\xfa\xba\xb4\xad\xc7\x6d\xf7\x6a\x4e\x6c\x8a\xe3\xbe\x44\x21\x76\xd0\xf5\x1b\xcb\x46\x2a\x16\xe3\x7c\x42\xc8\x8b\x24\x7c\x91\xd1\x00\xa3\xbe\x90\x38\x1a\x4d\xf4\x1e\x7d\x75\x36\xee\x93\x02\xe1\xb2\x18\xa5\x3e\xdc\x0c\x29\x0b\x78\x2e\x75\xfb\x88\xc9\x34\xe5\x65\x50\xdf\x55\x98\x66\x23\xf2\x94\xb5\xa5\x16\xec\x61\x79\xf8\x6e\xda\xf9\x9e\xa3\x57\xa5\x22\x3e\x9b\x90\x78\x39\x21\x17\xac\x45\xb5\x19\x69\x4b\x99\xcb\xe7\x1e\x8d\xf3\xa9\x7c\x05\xd1\xdb\x3c\xe0\x17\x32\x9c\xe9\xbc\x30\xf0\x50\xe9\x4b\xad\x43\x7e\xcf\x43\x61\xc7\x86\xdd\x76\xa9\xc5\x3a\xbf\xd3\x98\xd0\x39\xf9\x19\x9a\xc8\x1f\x54\x44\xf1\xfb\xb1\x9a\xf3\x48\x22\x77\xa2\x3b\xc8\xb8\x40\x9f\x04\xcf\x80\x53\x21\x25\xb7\xe3\xf9\xe3\x8b\xf2\xca\x84\xad\xa5\x2d\x0a\x2b\x48\x14\x58\xe6\xa0\x45\xa9\xdf\x9e\xc8\x2f\xed\x5f\x2a\x92\x0d\x39\xb1\x7c\x00\x9f\xd3\x87\x7f\xf0\x9a\xa4\x37\x8a\x22\x63\x4d\xc7\xd3\x07\x9f\x0b\x31\x3b\x0b\x96\xc9\x17\xf6\x3f\x74\xd8\x7d\xc3\xa6\x81\x06\x77\x54\x4a\x86\xdc\x09\x95\x3d\xb3\xd7\xbd\x38\xd1\x04\x52\x80\x5f\xd2\xb3\x1d\xd3\x7e\x05\x32\x99\xac\x06\x32\x50\x91\xa5\x44\xda\x70\x8e\xf4\x3c\xf7\x7e\xe2\x6c\xe3\x5a\x30\xd0\x33\x9e\xfe\x3a\x1d\xcb\x08\x0a\x75\x6f\x6d\xd6\xae\xbc\x86\x91\x08\xf9\x5a\xd3\x60\x58\x4f\xcd\xd3\xb6\x56\xea\x2d\x44\x71\x1a\xd8\x1c\x89\x96\x18\x0e\x23\x81\x99\xf5\x17\x5f\x83\x8c\xf0\xe8\x0e\x37\x81\x2c\x0e\xdb\xb5\x95\x56\x96\xef\xd3\xb0\xa6\x88\x85\x56\x1e\xef\x9a\x5e\xb1\x6e\xcf\xd6\x6a\x18\x3b\xcc\x91\x9b\xde\x17\x5f\x8f\x00\xa5\x2d\x37\xff\xd2\x81\x78\xbf\xa6\x4c\x6f\x4f\x20\x4e\x3e\x14\x80\x86\x2e\xff\xd6\xc3\xf1\x46\xe1\x44\xb0\x08\x2d\xe5\x03\x8e\xf6\x47\xf4\xd4\x11\x55\x8a\x84\x0b\x6c\xca\xc4\x5a\xbd\x21\x7c\x60\xb9\x0d\x1c\xfb\xcf\x26\x10\x2a\xd0\x0d\x0e\xcb\xf1\x3a\xcf\x19\x67\x8b\x00\x1f\x14\xbf\x5a\xdd\xd1\x24\x21\xc5\x65\x07\x96\x87\xb3\x7f\x06\x00\x00\xff\xff\xae\x5e\x22\x54\x73\x22\x00\x00")
func complySoc2PoliciesMediaMdBytes() ([]byte, error) {
return bindataRead(
@@ -817,7 +817,7 @@ func complySoc2PoliciesMediaMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/media.md", size: 175, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/media.md", size: 8819, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -837,7 +837,7 @@ func complySoc2PoliciesOfficeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/office.md", size: 3927, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/office.md", size: 3927, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -857,7 +857,7 @@ func complySoc2PoliciesPasswordMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/password.md", size: 1796, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/password.md", size: 1796, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -877,7 +877,7 @@ func complySoc2PoliciesPolicyMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/policy.md", size: 892, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/policy.md", size: 892, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -897,7 +897,7 @@ func complySoc2PoliciesPrivacyMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/privacy.md", size: 346, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/privacy.md", size: 346, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -917,12 +917,12 @@ func complySoc2PoliciesProcessingMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/processing.md", size: 210, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/processing.md", size: 210, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesRemoteMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4c\xcb\x31\x8a\xc3\x30\x10\x85\xe1\x5e\xa7\x78\xb0\xb5\x96\xb5\x8b\xdd\x45\x9d\x11\x29\x92\xca\xd8\xb9\x80\x90\x27\x61\x82\x67\x06\x2c\x39\xe0\xdb\x07\xbb\x72\xf5\x7e\x1e\x7c\x9a\x84\x02\x06\x12\xab\x84\x2e\x67\x2a\x05\xbd\xcd\x9c\x37\x97\xf2\x62\xba\x49\xc0\x70\xe9\x7a\x57\x52\xe5\xf2\x60\x2a\xc1\x01\xf7\x31\xee\x03\x78\xc4\xf8\xfb\xdd\x9c\xba\x3d\xf5\x9f\x93\xf4\xb2\x65\xa0\x37\x17\x36\x3d\xa8\xc7\x94\x2a\x05\xdc\x56\x45\x83\xf6\xa7\xf9\x3f\x40\x36\x11\xd2\x1a\x70\x55\xae\x9c\x66\x4c\x96\xd7\xfd\x71\xde\x7b\xe7\xbe\x10\x4d\x58\x9f\x18\xcd\xf4\x13\x00\x00\xff\xff\x4e\x47\x83\x97\xb4\x00\x00\x00")
var _complySoc2PoliciesRemoteMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x57\xcd\x72\xdc\xc8\x0d\xbe\xcf\x53\xa0\x2a\x17\x5b\x35\x9a\x8d\xf7\x90\xa4\xb4\x27\x47\x71\x55\x9c\xaa\x64\x55\xb2\x37\x7b\xee\x69\x82\x43\x44\xcd\x06\x83\x06\x35\xa6\x4f\x7e\x8d\x54\x25\x2f\xe7\x27\x49\x01\x4d\x72\x46\x96\x64\x25\xa7\xf9\x61\xff\x7c\xc0\xf7\xe1\x03\x98\x43\x8f\x57\x70\x8b\x3d\x2b\xc2\xdb\x18\xb1\x14\xb8\xe1\x44\x71\xda\x84\x28\x9c\xa7\xfe\x0a\x6e\xdf\xbd\xbd\xd9\x94\xa0\x54\x5a\xc2\x72\xb5\x01\xf8\xf8\xe1\xda\x3e\x00\x2e\xe1\xfa\xfa\x77\xbb\x37\x67\xdf\x7f\x3c\xfb\xfe\xfb\x4d\x1f\xfe\xc1\x72\x8b\xf7\x54\x88\xb3\x6f\xbd\x84\x26\x28\x5e\xc1\x5f\xc6\x0c\x6f\xe0\xc7\xdf\xbe\xf9\x83\x6f\x88\xdc\xf7\x98\xf5\x0a\xde\x67\x52\x0a\x09\x1a\x8e\xa3\xfd\xb3\xb9\xbc\xbc\xdc\x6c\x7e\x03\x37\xa3\x0c\x5c\x10\x42\x6e\xe0\x43\xe4\x01\x37\x9b\xb0\x83\x8f\x1d\xc2\x30\x3f\xe1\x16\xb4\xa3\x02\x83\xc3\x07\x2a\xa0\x0c\x0d\xb6\x94\x11\x04\xff\x39\x92\xa0\x1d\x58\xa0\x65\x81\xc8\x39\x63\x54\xca\x07\x5b\xa5\x1d\x02\xcb\x21\x64\xfa\x1c\x94\x38\x7f\xfd\xf2\xaf\x02\x65\x2a\x8a\x7d\xf1\x1b\x33\xea\x91\xe5\xae\x40\x2b\xdc\x83\xd4\x74\x75\x5c\xb4\x6c\x81\x72\x4c\x63\x63\x27\x0d\x28\x85\x73\x48\x69\xba\xe4\x63\xc6\x06\x1a\xbc\xa7\x88\xbe\x06\x58\x1a\x14\xbb\xac\xa7\x4c\x3d\x7d\x46\x4b\x44\x80\xc4\xa5\xfc\x80\x9f\x06\x2e\xa3\xe0\x6e\x0e\xea\x14\x45\x18\x86\x44\xe8\xa1\x84\x94\x60\x2c\x28\xc5\x22\xa5\xdc\xb2\xf4\x0e\x76\x05\x7a\x24\xed\x28\x3f\x0a\x66\x3e\x50\xa7\x81\xa2\x61\x9b\x01\x63\x01\xec\x87\xc4\x13\x62\x8d\x31\x72\x56\x09\x51\x59\xca\x16\x42\x81\x23\xa6\x64\x9f\x21\x4f\x80\x9f\x14\x25\x87\x04\x43\x10\x75\x3c\x5d\x50\x23\x0d\x81\xb2\xb2\xef\x0d\x51\x1d\xc2\x83\xc4\x9d\xe3\xf4\x0b\x38\x25\x6c\x60\x3f\x3d\x82\x09\xaf\x3a\x14\xa4\x1c\x5a\x45\x01\xc1\x16\x45\xb0\xf1\xc0\x0b\x7c\xfd\xf2\x6f\x0f\xfd\xeb\x97\xff\xbc\x7e\x98\xa0\x7e\x2c\x0a\x7b\x84\x3e\x34\x46\x73\x68\x28\x4d\x10\x5c\xc8\xb4\x4f\xf8\x20\x71\x3b\x13\xd2\x1f\x43\xbc\x3b\x08\x8f\xb9\x59\x15\x44\x59\x31\xeb\xd3\x02\x5a\xd9\x7a\x52\x23\x0b\x71\x2e\xb5\xd0\x87\x03\x16\x38\x76\x14\x3b\xe8\xc3\x04\x82\x65\x4c\x5a\x35\x63\xdb\xc7\x1c\x46\xed\x58\xe8\x33\x36\x8b\x88\xc6\xaa\x5c\xc1\xc2\xa3\xcc\x5a\x59\xf4\xb4\x1f\x15\x32\x2b\x24\xea\x49\x3d\x17\x57\x7e\x8e\x69\xc6\x36\x15\xcc\x85\x94\xee\x71\x6b\x54\x0c\x46\x54\xe4\xdc\x52\x83\xb9\xd6\x90\x09\xac\xb2\xa0\x98\x12\x46\x1d\x8d\x42\xe1\x01\x45\xa7\x9f\x66\xc4\xcf\x16\xc0\x30\xee\x13\x45\x20\x5b\xf4\xe2\x62\xbb\xc2\x15\x32\xb3\xff\x93\x5f\x6c\xc5\xe7\x42\xf8\x81\x05\x58\x3b\x14\xfb\x2b\xe4\x68\xf0\x12\x85\x3d\x25\x72\x3d\x51\x8e\xa3\xf3\x6d\x82\x5b\x12\x67\x21\x8e\xb1\xf3\x78\xb1\xd4\xe2\xf8\x75\x11\xf9\x4a\xd4\xd6\x01\xb5\x9c\x12\x1f\x2d\x6b\x5e\xf2\x64\xc0\x8a\x97\xcf\x74\xb5\x71\x8b\xa1\x1d\x5c\xfc\x95\xf7\x94\xd0\xb3\x35\x7a\xf1\x9b\x2f\x0c\xee\x3c\x17\xa7\xba\x18\x58\x34\xec\xd7\x75\x68\x05\xd1\xd7\x9d\x43\xc7\xd9\x48\x2a\x7d\x10\x5d\x7f\xf5\xd8\xb3\x4c\x10\x83\x34\x55\xf6\x35\xd4\x79\xcf\x7a\x87\x91\xdd\xb8\xfd\x14\x65\x09\x07\xdc\x1a\x19\x2e\xd5\x7c\xf0\x7d\x2a\x21\x97\x16\xc5\x42\x37\xf6\x76\x27\xe8\xb7\x27\xd3\x31\xac\x65\xb6\xb6\x9a\xb1\xfc\x84\x21\x6c\x21\x73\x63\x4c\x2d\xee\x55\x8b\x96\x8a\x6b\x6a\xcc\x66\x47\x0d\x09\x46\x5d\x2a\xb3\x56\xc0\x37\xee\x71\x42\xf0\x11\x13\xda\x39\x57\x17\xbe\xca\xea\x9d\x5b\x18\x1d\x7c\xff\x6c\x62\x3d\xae\x73\xc7\x34\x09\x0d\x28\x06\x17\x1c\x16\x8f\x5a\xa8\x79\xa6\xc2\x86\x6e\x2a\xe6\x5b\x30\x08\xf6\x64\x32\x80\x05\x87\x93\xcd\x58\xe3\x99\xc9\xab\x75\x56\x6b\xea\x01\x65\x5e\xfb\x73\x67\x33\x1d\x5d\x7c\xc0\x38\x0a\xe9\x04\xb7\xdf\xf6\x86\x39\xd3\x7f\x76\xb0\x86\x7e\x56\xcd\xf5\x1a\xdc\xbb\x25\xb8\x8b\x35\x3b\xd7\x61\xf4\xdc\x2f\x7e\x84\x9f\x50\x22\x19\xdf\xc7\x0e\xf3\xf7\xf2\x63\x42\x4e\x21\x62\x63\x54\xb9\x42\x28\xc3\x98\xcf\xdc\xb2\x0c\x21\x62\xa9\xb5\x10\x0a\xdc\x63\x47\x31\x99\xee\xe6\x0a\xad\xcf\xb7\xd0\xb1\x62\x02\x61\xee\x5d\x93\xe8\x17\xf9\xd9\x65\xeb\xce\x80\x82\x39\x22\x44\xcc\x55\xd4\x27\xa9\x8e\x79\x10\x56\x8c\x66\x33\x41\x30\x94\x17\x58\x59\xc8\x58\xe3\xff\xd5\xa2\xac\x5a\x78\x40\xb6\x5d\xf1\x7c\xf0\xdb\xb9\xa3\x79\xd6\x34\xdc\xa1\xd5\x10\xce\x42\x3d\x53\x34\xd7\xb6\x56\xfb\x29\xbc\xc2\xdd\x61\x07\x0d\x95\x21\x85\xc9\xf2\x56\x9f\x96\x28\x88\xf9\x35\xc4\x90\x4d\x11\xfb\xda\x0f\xac\xd7\x3c\x30\xde\xda\xa5\x0b\x50\x7b\x7e\x26\x15\xd8\xa3\x61\x73\x06\x6a\x4b\xb3\x09\xe1\xc5\xf1\x80\xa5\xaa\xd8\x5b\xdf\x93\x2b\x1f\x16\xf2\xed\x79\x76\x16\xb5\x8c\x83\x4d\x45\x8d\x67\x6b\x08\x1a\xbb\xd9\x27\xdc\xed\x83\x62\x51\x28\x8b\x5e\xeb\xda\x62\x51\x07\x85\x84\xa1\x28\x04\xe8\x39\x6b\x97\x26\xd8\x87\x42\xe5\x7b\xb7\x75\xe1\x1e\x01\x73\x33\x30\x65\x85\x99\x75\x77\x0d\x6e\xf5\x68\xd9\xaf\xe9\xed\x43\xf2\x5f\xc5\xd2\x89\xf2\x1a\x28\x17\x0d\xae\x47\x43\xb9\x22\x56\xef\xaf\x4a\xfd\xb9\x1a\x6e\xe6\x1c\xbf\x68\x0e\xdc\xb6\x97\x8b\x98\x4c\x78\x66\xfd\x03\xe7\xda\xbb\x2d\x03\x82\x87\x31\x05\x81\x7d\x88\x77\xe3\xe0\x6d\xef\x3c\xc1\x4b\x97\x73\xc5\x08\x9a\x62\xcb\xa2\x87\x13\xbb\x27\x60\xf3\x88\xfb\x12\xa9\x0b\x2f\x0d\x67\x3b\x47\x78\x3c\x74\xe6\xb2\x98\xa3\x4c\xc3\xc2\x94\x89\xca\x5a\x6d\xf4\x4c\xfc\xfd\xe6\x6f\xeb\x58\xc9\xb9\xea\xa1\x1f\x93\xd2\x65\xeb\x53\xd5\xf9\x72\x5b\x80\xd9\x7a\x4c\xb3\x83\xb7\xeb\x60\x57\xc7\xd4\xb3\x1a\xaa\x83\xcc\x0a\x67\x10\xae\xa3\xb4\xd9\x8a\x9d\xef\x77\x0a\xce\x0d\xbf\x9c\x46\x8d\x27\xfa\xf3\xa9\x9a\x14\x63\x97\x39\xf1\x61\x02\xc5\xd0\xef\xfc\x98\x3b\x9c\x4e\x17\x09\xeb\xc2\x6e\x15\x98\x1e\xad\x48\x06\x14\x98\x30\x88\xe9\xea\x9e\xe7\x38\xb8\x7d\xbc\x7f\x76\x63\xb7\x32\x83\xf3\x73\xdb\xee\x39\x88\x4f\x35\xd5\x84\x77\xb0\x72\xf2\xfe\xbc\x75\x29\x4b\xad\xe8\xef\x88\x66\xf5\xd8\x95\x8d\xaa\xb3\x2e\x48\x03\x8d\xd0\x3d\x42\x3b\xa6\x64\x0e\x71\xb7\x2c\xaa\xad\xec\xfb\xc6\xbf\x34\x96\x93\xab\xbf\x5b\x27\xe4\xe5\xce\x32\x60\xa4\x76\x9e\xa4\xcf\x5c\xc5\xab\x75\xde\x6f\x51\x1f\x85\x1c\xf5\xc2\x08\x09\x74\x95\xda\x3e\xe4\x70\x40\x81\x93\x28\x7f\xce\x69\x9a\x95\xea\x5c\x85\x52\xe8\x60\x1c\xdb\x5b\x84\x78\x83\x40\xe9\x49\xeb\x0c\xe8\xed\x6d\xd6\x87\x75\xf8\x97\x3c\x76\x07\xbf\x9c\xdb\x5f\x55\xda\xab\xa5\x99\xb8\xfd\x17\x48\x74\x6f\x9b\x66\x33\xf3\x71\x44\xe7\x59\x73\xa6\xf9\x68\xe3\xf9\x59\x88\x65\x69\xe2\xd8\xbc\xf6\xca\x35\xcf\x7d\x84\xd3\xaf\x99\x5f\x83\x4e\x01\xff\xa9\xfe\xb1\x26\xf5\x01\x3c\xbb\x3c\xa2\xa8\x67\x59\xb1\xac\xbb\x7e\x71\xe4\xf3\xad\xfe\xe6\xb6\x80\x79\xca\x38\x7c\x44\x19\x6c\xd8\x15\x0a\x8a\x75\x32\x3e\x8c\xb2\x8a\x76\x89\x2e\xad\xf3\xd1\x3a\x9e\x3d\x7e\x3b\x7c\xef\xe3\x2d\xea\x9c\x16\x3a\x23\x7b\xc9\xd0\xee\x7f\x00\xea\x01\xcf\x9e\xfb\xfc\x1c\xfd\x68\x54\x07\xa1\x43\x67\xef\x9a\x48\x75\x86\xb6\x11\x72\xb1\xeb\x75\xb2\xee\x83\xa2\xb8\x0d\xb8\x1b\xda\xb3\x41\xb0\xb8\xcd\xe6\xff\x4b\x31\x9b\xff\x06\x00\x00\xff\xff\xfd\x98\xef\x1a\x17\x10\x00\x00")
func complySoc2PoliciesRemoteMdBytes() ([]byte, error) {
return bindataRead(
@@ -937,7 +937,7 @@ func complySoc2PoliciesRemoteMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/remote.md", size: 180, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/remote.md", size: 4119, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -957,12 +957,12 @@ func complySoc2PoliciesRetentionMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/retention.md", size: 6811, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/retention.md", size: 6811, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesRiskMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\x8d\x41\x0a\xc2\x30\x10\x45\xf7\x73\x8a\x0f\xae\x23\xd6\x95\x66\x57\xb2\xaa\xab\xd2\x7a\x81\x90\x46\x19\x6d\x66\xa0\x93\x0a\xbd\xbd\xb4\xab\x0f\x9f\xf7\x78\x12\x4b\xf6\x18\xd8\xbe\x68\xcd\xb2\x59\xc9\x52\xd1\xeb\xcc\x69\xa3\x98\x16\x95\xad\x78\x0c\x5d\xdb\x93\xc5\xca\xf6\xe2\x6c\x9e\x80\xe7\x18\xf6\x01\x1c\x42\xb8\x9f\x1b\x2a\xf1\xa3\xcb\x90\x7f\x6c\xac\x72\x20\x0e\x53\xac\xd9\xe3\xb1\x0a\x1a\x5c\x2f\xcd\xed\x10\x92\x96\xbd\xe1\xd1\x09\x57\x8e\x33\x26\x4d\xeb\xfe\x90\x73\x8e\xe8\x84\xa0\x85\xe5\x8d\x51\x55\xfe\x01\x00\x00\xff\xff\x7a\xe5\x35\x49\x9e\x00\x00\x00")
var _complySoc2PoliciesRiskMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\xcd\x92\x1b\xb7\x11\xbe\xf3\x29\xba\x2a\x55\x89\xbc\xa6\xc6\x96\xed\x43\xb2\xaa\x1c\x36\x8a\x53\x51\x4a\xb1\x5d\xab\xcd\x03\x80\x98\x1e\x0e\x2c\x0c\x30\x46\x63\xc8\xa5\x4b\x87\xbc\x46\x5e\x2f\x4f\x92\xea\x06\xe6\x97\xdc\x15\xe9\xad\x52\x2e\xcb\x8b\xad\x59\x00\xfd\xf7\xe1\xeb\x06\x1a\x4e\x35\x78\x0d\xb7\x86\x3e\xc0\x0d\x11\x12\x35\xe8\x22\xfc\xe4\xad\xd1\x87\x95\xd2\xc1\xbb\x43\x73\x0d\xb7\x6f\x6f\x7e\x5a\x91\x8a\x86\x2a\x83\x74\xbd\x02\xb8\x7b\xff\x86\xff\x03\xf0\x12\xde\xbc\xf9\x53\xf1\x6a\xd5\xa8\x9f\x7d\xb8\xc5\x9d\x21\xe3\x9d\x0c\x79\x09\xa5\x8a\x78\x0d\xff\xe8\x1c\xbc\x82\x6f\xbe\x7e\xf5\x47\x99\xa0\x7d\xc3\x32\xae\xe1\xad\x33\xd1\x28\x0b\xa5\xd7\x1d\x7f\x59\xbd\x7c\xf9\x72\xb5\xfa\x1d\xfc\xd4\x85\xd6\x13\x82\x72\x25\xbc\xd7\xbe\xc5\xd5\x4a\x15\x70\x57\x23\xb4\xf9\x2f\xbe\x82\x58\x1b\x82\x56\xd4\x04\x43\x10\x3d\x94\x58\x19\x87\x10\x6b\x84\x06\x63\xed\x4b\x6f\xfd\xf6\x00\x95\x0f\xf2\x4d\x8d\xd6\xf1\xc2\x31\xa0\x8a\xf2\x2f\x5f\x81\x71\x95\x0f\x8d\x8a\xc6\x3b\x20\xd4\x5d\x30\xf1\x00\xc1\xd0\x07\x82\xbd\x89\xb5\x71\xb2\x82\x0f\x5b\xe5\xcc\xaf\x32\x6c\x9d\x16\x99\x49\x55\x5a\x63\x1b\xd5\xc6\x22\x58\xdc\xa1\xe5\x95\x79\x11\x50\x04\x84\x11\x36\x87\xa3\x65\xfe\xfb\xef\xff\x10\x58\x54\x25\x06\xaa\x4d\x5b\x88\xa5\xb7\x69\xce\x4c\x5d\x59\x67\xd4\x59\x05\x04\xd5\xb6\xd6\xa0\x28\xc1\xcb\xa2\x8b\x26\x20\x10\x3b\x2c\x39\xe8\x84\xa8\x93\x86\xb6\xc1\x6f\x83\x6a\x06\x93\x94\xb5\x22\x3e\x12\xec\x6b\xa3\x6b\x91\xd6\x11\x96\x0f\x39\x03\x7c\xc8\x23\xb5\xef\x6c\x09\xb5\xda\x71\xf4\xc0\x34\xad\xd2\x11\xbc\x3b\x2d\x37\xaf\x66\x62\x91\x03\x3c\x46\x34\xd9\x46\xbd\x36\xd8\xb4\xd6\x1f\x10\xe9\x94\x61\xb0\xaf\x3d\x44\xf5\x01\xa1\x55\x21\x82\x71\xbd\xd3\x1f\x71\x60\xc1\x38\xfb\x8b\xd2\x1f\xb6\xc1\x77\xae\x14\xf9\x37\xf0\x01\x0f\x80\x16\x7b\x54\x5c\xec\x41\x06\xa2\x82\xda\x5b\x43\xd1\x68\x11\x4b\x07\x8a\xc8\xc3\x35\xdb\x14\xbc\xd2\x35\x1b\x25\xda\x34\xca\xa9\xad\x48\x9b\x1b\x9f\x20\x45\x22\x3f\xe0\x2f\x9d\x09\x32\x88\x64\xc1\x36\x78\x8d\x6c\xda\x00\xec\x99\x2f\xa2\x07\x53\x32\x14\xaa\xc3\x23\xa8\xce\xbb\x29\x2d\x05\xda\x3b\x32\x14\xc5\xb9\x95\xef\x82\xf8\x91\xae\xfb\x85\x8c\xce\x41\x7e\xc0\x23\x09\x2a\x6b\x46\xf9\x1e\x05\x3a\x32\x2e\xd6\xec\xec\xa4\xf4\xae\xb3\x0e\x83\xda\x18\x6b\xa2\x84\xb5\x56\x51\x62\x7c\x78\x3d\x8d\x53\x96\x60\xcd\x07\xb4\xa6\xf6\xbe\x94\xc9\xac\x1e\xfe\xd2\xa1\xd3\x08\x2f\x58\xfd\x2f\xfa\x81\x8f\x89\xd8\xa0\x71\x5b\x08\xa8\xac\xf9\x15\xcb\xf5\x29\x63\x86\xdd\xc4\x9e\x44\x8e\x4c\xe7\x26\x7b\x98\x45\xa5\x3d\x81\x3b\x65\xbb\x99\x13\x02\x92\x29\x3b\x65\x33\xd4\xaa\x88\x61\x01\xae\x5b\xac\x30\xb0\xce\x34\xee\xe9\x09\xc1\xde\x62\xeb\x43\x84\x3b\x46\xb6\x8a\x28\xb4\x97\x38\x97\x47\x5f\x2d\x86\x5f\xad\x84\x39\x4d\x8a\xdb\x12\xde\x7d\x1c\x8d\xd3\xb6\x2b\x33\x70\x4e\x05\xef\x61\x6f\xd5\x6a\xc7\xee\x62\x3e\xf3\xb2\x2f\x99\xa5\x5b\xe5\x0e\x39\xba\xc5\x4c\x81\xca\x04\x8a\x40\x11\x5b\xc8\x6c\xb0\x54\x29\x31\xf2\x80\xc4\x29\xa3\x8c\x14\x32\xa3\xaa\xc7\xf6\xd5\x6b\x16\xe3\x63\x8d\x01\xf6\x3e\x94\x8c\xb5\x25\x45\x35\xea\x00\xaa\xaa\x50\x47\x59\x4d\x7b\x57\x25\xe9\xca\x9a\x78\x58\x83\x71\x11\xb7\x41\xfe\x57\xb9\xf2\x2b\x1f\x40\xed\x94\xb1\xc9\x03\x87\x65\x0e\x38\xc1\x71\x85\x84\x23\x92\x48\xca\x9e\x1e\x32\x17\xfb\x1e\x5a\xd5\x62\x60\x2a\x44\x8b\x3a\x06\xef\x8c\x66\x64\x31\xaf\x32\x9b\xa5\x40\xd0\x9a\x93\xa2\xda\x28\x42\x5a\xcf\x64\x46\xd4\xb5\x4b\x29\x8b\xf7\x7c\xcb\xeb\xca\x88\xa0\x28\x86\x4e\xc7\x2e\x60\x86\xe3\x7d\xc4\xe0\x94\xfd\xca\x77\x91\x7c\x17\x34\x96\x40\x18\x76\x46\xe3\x82\x23\x0a\xf8\x5b\x0f\x6d\xf1\x16\xcf\x07\xbf\x77\x18\xa0\xe9\x28\xc2\x66\xc4\x09\x96\xf3\x18\x3b\xbc\xef\x43\x7c\x1c\xcb\xc7\xa0\xa4\x88\xbc\x36\x2a\xe6\x74\x31\x91\xce\x0b\x3f\x3c\xaf\x57\x88\xc9\x13\x4b\x76\xa8\x3a\x82\x95\x6c\xcc\x02\xbe\x1f\x96\x94\x68\x6c\xf0\x48\x68\xd3\xd9\x68\x5a\x3b\x50\x44\x76\x9c\xb0\xaf\x7c\xf9\xe4\xc4\x85\x7a\x9c\x1e\x48\x35\xad\x3d\xc6\x7a\x62\x0b\xa6\xef\xe0\x77\xa6\xc4\x92\x19\x50\x52\x51\x86\xf6\x27\xf6\x3e\xbc\x08\x3d\x57\xc0\x0b\xf5\xc5\x17\x63\x1c\x86\xe0\xf5\x54\xf4\x70\xec\x46\x62\x48\x43\x24\x95\xe7\xaa\x27\xf6\xd3\x92\xc9\xb2\xf5\x54\xc3\x5b\xae\x34\x3b\xa1\xb1\x51\xe4\x8f\xac\xc5\xb8\x0e\x49\xee\x1f\xe5\xac\x79\xf6\x21\x29\x90\x5c\x70\x9d\xa6\xf2\xef\x55\x01\x6f\x46\xae\xa6\x91\x56\xb5\x6f\x36\xc6\x9d\x45\x44\x3c\x87\x2b\x87\x41\xb5\x6c\x81\xa9\x80\x3a\x8e\x7a\x9f\x3a\x23\x06\x23\xdc\x4e\x05\xcc\x54\x78\x37\x66\x0f\x5f\x81\xd7\xba\x0b\xc9\xb7\x7e\xbe\xc4\x0b\x53\x60\x21\xce\x68\x83\xdf\xf4\x44\x90\x32\x53\x0f\x92\xbd\xe1\xe2\xe3\xbe\xb5\xde\x24\x5e\x99\xaa\x7b\x98\x24\x83\x16\x75\x34\xbb\xec\xee\x3e\x82\xbd\x4f\x82\x11\x65\xc5\xb6\x12\x23\x86\xc6\x38\x66\xdb\x69\x62\x63\x5f\x4c\xf3\x5e\xc0\x5c\x06\xc8\x4e\xb8\x63\x88\x11\x7c\x2b\xc3\xbe\x2b\x64\xf5\xa3\x8c\x90\x6a\x4e\x43\xa0\x95\xd5\x9d\x15\x4c\x6f\x0e\xa0\xca\x52\xa8\x3d\xb1\xe2\x20\x90\xb4\x0f\x38\xc0\x64\x22\x5a\xfe\x50\xac\x56\x5f\xbe\x5c\xfe\xce\xf9\x72\xd1\xef\xcb\xd5\x47\xb8\xba\x9a\x60\xe6\xea\x0a\x4e\x7f\xf9\x2b\x92\x0e\xa6\x65\x04\x5d\x5d\xc1\x45\xbf\x8f\x20\x52\xde\xb1\x77\x86\xb9\xfc\xe5\x3d\x1b\x3a\xf9\xf2\xa4\xdf\xc7\xd5\x97\x7f\x5e\xfe\xce\xf9\x72\xd1\x8f\xfd\xf5\xce\xef\x17\xe6\x7d\x7d\x64\xf0\x3b\x4f\x52\xcc\x3d\x96\x05\x97\x19\x50\xa0\xee\x7c\xcc\x8e\x58\x1d\xfb\xe3\xd4\x97\x49\xca\x9d\x26\xcb\x3f\x30\x08\xa9\x86\xca\xfa\xfd\x1a\x2c\x6e\x95\x15\x89\xda\xbb\x18\x94\x8e\xbc\xaf\x1f\x70\xf9\x99\x82\xfd\xc6\x9a\x6d\x9f\x51\x7d\x80\x80\x6d\x17\x73\xa2\x3e\x27\x58\x9f\x07\xdc\xe7\x58\xf2\xa4\xdf\x47\x80\xcf\x65\xcb\x3f\x7d\x89\x81\xd3\xd5\x20\xfa\xd5\x91\x32\xbf\x05\x78\xb9\x9e\xea\xc2\x45\xc0\xab\x8c\x53\x4e\x1b\x65\x41\x7b\x4a\x27\xbc\x5a\x11\x58\xbf\x67\x01\x4d\xaf\xeb\x78\x02\x65\x88\x3e\xea\xf4\x73\x81\x37\x87\xb9\x60\x7b\x09\xed\x09\x38\xfb\x6a\x33\x4f\x3f\x53\xc8\x45\x68\x3e\x9a\xfe\x8c\xee\xcb\x6d\xf9\xbb\xd9\xd6\x73\xd1\xdf\x1c\x29\xf3\x9b\x69\xb5\x56\x3b\xbc\x08\xdd\xa6\x69\xb0\xe4\xaa\x54\x80\xed\xc3\x57\x72\x44\x67\x4c\x4b\xa9\x39\x03\xf5\x02\x90\x4f\x43\xf7\x84\xb5\x7d\xcb\x5b\x28\x11\x6c\x42\x79\x3e\x8c\x9f\x82\xf9\x9a\x61\x7e\x2e\xba\x2f\xa5\xeb\xf9\xf4\x67\x74\x5f\x6a\xcb\x4a\xca\x47\xf8\xf6\x1a\x26\x65\x14\x03\x79\x52\x67\x81\xd4\x47\xa9\x26\xef\x0b\xd6\xcf\x58\x03\x8e\x45\x3b\x17\x64\xa7\xbf\x3c\xd7\x80\x97\xd4\x80\xdf\x1b\xb9\x2f\xc1\x7b\x43\x91\xeb\xff\xe1\x4e\x45\x76\xb0\xb7\xe9\x64\x47\x31\x78\xb7\xcd\xe9\x73\x87\x40\x1e\x2a\x15\x2e\xab\x01\xc7\x63\xaf\x03\x55\xe2\x2f\x1d\xf3\xd6\x70\x01\xde\x06\x1f\xf9\x70\xe4\x9d\x90\xe3\xf2\xbc\xf5\xa4\x54\x5c\x8d\x77\x4f\xf3\xeb\x3e\x3e\x05\xe1\x7d\x0c\xd8\xa0\x3d\x70\x51\x50\xc0\x0f\x1e\x1c\xee\xb9\xd2\x10\xea\xa6\x73\xc9\x8a\xbd\x84\xf7\x7c\xbe\x4b\x47\x31\x96\x58\x75\xb1\x0b\xf8\x5c\x68\xfe\xff\x0a\xcd\x4f\xa3\x5b\xe0\xdc\x70\x6d\x38\xc5\xe7\xc4\x15\xe7\xc6\xff\x11\x44\x9f\x02\xf4\x14\x94\x4f\x42\xf7\x31\xa2\xfb\xaa\xb6\x80\xf7\xbe\x41\x68\x8c\xf3\x61\x04\xb4\x14\xd2\x62\xf5\x05\x85\xa6\x5c\x8f\x60\x59\xc0\x0f\xd3\xbd\x21\xa0\x6f\x3d\x91\xd9\x58\x5c\xc3\xa6\x8b\x72\x32\xac\xcd\xb6\xe6\xed\xc4\xcc\x7c\x28\x9e\xd1\xfd\x9b\x6c\x39\xa7\xd0\x3c\x8f\xbb\x39\x24\xc6\x41\x6b\x95\xe6\x2a\x70\x98\x7e\x6e\x8d\xe9\x50\x0e\xef\x66\x87\xaf\x19\xb2\x01\x73\xfb\x8a\x15\x7c\x08\xd1\x09\x94\x4f\x03\x76\x0f\xe9\x02\xde\x0e\x80\x4b\x1d\xc3\x24\xdb\x1e\x5f\xe0\xb1\xd0\xc4\xbd\xe7\x5a\x77\x36\x45\x4f\x67\x3d\xe3\xf9\x52\x5b\x72\x69\xf9\xdd\x51\x69\x39\xb9\x85\x3d\x59\x59\xfe\xde\x6d\xa8\x7d\xbd\x5a\x6d\x86\x66\x57\x6a\xbc\x71\x29\xda\x8f\x1a\xbb\x5e\x32\x62\xa7\x6c\x87\x04\x5f\x43\xac\x83\xef\xb6\x35\x7c\x23\xdb\xa0\x3f\x1a\xa5\x4e\xf8\x66\xd6\x86\x4f\xcd\xce\x93\xcb\xe4\xbb\xd4\xd3\x4b\x1c\xf5\x01\xa9\x80\x7f\x1d\x7d\x1b\xee\xe1\xa5\xf3\x27\x0d\x14\xdd\x9b\x73\xd7\x37\x03\x17\x46\x8c\x4d\x47\x43\x7c\x90\x4b\xdd\x66\x16\x9c\x8d\x1a\x1a\x06\xc3\x0a\xe9\xf6\xb7\x80\x1b\x6b\xb3\xdc\x2a\xf8\xe6\x64\x67\x21\x45\xa3\x57\x4b\xfb\x76\xf2\x3e\xe0\xd4\xa2\xe9\x4a\xda\x50\xeb\xc9\xe4\x57\x0d\x96\xeb\x41\x69\x85\x4c\x5e\x48\xb4\xc3\x5d\xc6\xbc\xf3\x39\xe9\x8a\x9c\x5c\x7e\xda\x14\x31\x0e\x66\x7d\x8e\x31\x2c\x37\xd3\x76\x89\xa1\xe5\x5b\x87\xdc\xcd\x92\xd6\x03\xbc\xf9\xfe\xc7\xfe\x4a\x25\x35\x04\xfb\x3e\x65\x6a\xa6\x07\x02\xaa\x95\xb5\xc3\x3d\x3b\x82\xdf\xfc\x9c\x58\x2e\x35\x17\x1a\x13\xe5\xd0\xea\xb6\x92\xbd\x59\x8a\xe4\xd9\x14\x64\xf6\xf1\x71\xf0\x8f\x02\x2d\x9d\x19\x26\x63\xe3\x3a\xdf\x49\x20\x83\xdf\xa5\x87\x03\xf9\x8d\x0a\xad\x8f\x55\x93\x26\xa5\x25\xcf\x0e\x95\xb0\x48\x93\x21\xd9\x91\x23\xeb\xc3\xa2\xf3\xba\x06\xdc\xa1\x03\x23\x24\x6c\xd2\xb8\x7c\x79\x6f\x08\x4a\xc4\xe6\x04\xf0\x27\xad\xbc\xa3\x20\xb2\x88\x24\xac\xef\x62\x4a\x49\xeb\xad\xf5\x7b\xf1\x4a\x1a\x37\xef\xeb\xbc\x97\x9e\x66\x2e\x79\x4a\xde\xd0\xbe\xed\x5b\xf6\xcb\xec\xf4\x82\x16\x0d\x90\xbb\xa0\x1c\x55\x18\x42\xdf\x87\x48\xe2\xa3\x97\x46\x8b\x09\xa5\x84\xff\xf0\x3a\x35\x8c\xee\x05\x4f\x6b\xd8\x1c\xd8\x95\xba\x56\x64\xe4\x84\x02\xc6\x51\x17\x84\x20\xf2\x3b\x09\x1f\x80\xcc\x56\x5a\x29\x6a\xb8\x98\x48\xd8\xa5\x4e\x1e\x90\x04\xe2\x41\xbc\xba\xc3\x40\x73\xad\x6e\x76\xde\x94\x53\x8d\x58\x62\x69\x28\x87\xb5\xff\xcb\xa6\x23\xe3\x90\x08\x14\x83\x68\x68\x11\x69\xd5\x11\x52\x6a\x28\xc9\x4e\x98\xaf\x2d\x91\x98\x2e\xfe\x3a\x41\x3b\x39\x57\xf6\x05\xc3\x33\xf2\xce\xf7\xce\x1e\x72\x7c\x81\x46\x3f\x57\x13\x60\x9c\xd8\x8b\x7b\x79\x69\x23\x97\x9f\x0d\x83\x21\xd6\x2a\x1d\x4e\x5a\x1f\xd3\xd5\xd4\x70\x4f\xf4\xe0\x31\x69\xb2\x05\xe5\x05\x43\x96\x2e\xfe\x5c\x4a\x5c\x8f\x8b\xa4\x6e\x22\xd5\xa2\x01\x52\x34\x0d\xd7\xc7\x51\x9a\xc6\xfb\xc7\xba\x5a\x99\x7a\xf3\x73\x09\xd6\xd5\x2a\xe7\xb0\x9c\xd7\x35\x13\x56\x2c\xf2\x6b\x08\xdc\x76\x56\x05\xb8\xc5\x9d\xc1\xbd\xdc\xc1\x2d\x99\x8f\x25\x3d\x44\xbc\x77\x0f\x32\xe5\xf1\xac\x05\x85\x76\x6d\x99\x7a\xc3\x35\x3a\xb6\x8e\x23\x35\x34\x43\x33\x8e\xe7\x1d\xd2\x02\x6e\x22\x28\x2e\xce\x4d\xd3\x35\xeb\x14\xf6\xb4\x4c\xe6\x4f\xb6\x21\xb3\x94\x90\xb4\x2b\x3b\x9d\x70\xc0\xd0\xc6\x00\x07\x54\xa1\x80\xb7\x92\x22\x72\xcd\x1d\x30\xbd\x9a\x2b\x25\x53\xa8\x78\x92\xfd\x1f\xb4\x66\x69\x88\xec\x9a\xca\x68\xe5\x22\xe8\x5a\xb9\x2d\x52\xaa\xb3\xfa\x5c\x31\x7f\xe9\x36\xbe\x4c\x58\x8f\xdb\x61\xa4\x55\x39\xd7\x0f\xdf\xd1\xed\x4c\xf0\x2e\xbf\x83\x49\xd1\x6b\x7d\x60\x54\x2d\x5e\xb1\x20\x75\x36\x3d\x3a\xfa\xf4\x7b\xad\xd4\xbf\x67\x97\x51\xb7\x49\xf0\x8a\xd9\x95\xb4\x1e\x9d\xd9\xbf\xc7\xe8\xdf\x0f\x9c\xee\xbc\x17\xab\xd5\xff\x02\x00\x00\xff\xff\x01\x6b\x63\xbb\xf6\x28\x00\x00")
func complySoc2PoliciesRiskMdBytes() ([]byte, error) {
return bindataRead(
@@ -977,12 +977,12 @@ func complySoc2PoliciesRiskMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/risk.md", size: 158, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/risk.md", size: 10486, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2PoliciesVendorMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\x8d\xc1\xaa\x83\x30\x10\x45\xf7\xf9\x8a\x0b\x6f\x9d\x47\x75\xd5\x66\x9b\x55\x0b\x82\xd4\xe2\x7e\x88\x53\x99\x62\x66\xc0\xc4\x82\x7f\x5f\x74\x75\xe1\x72\x0e\x47\x29\x73\xc0\xc8\x3a\xd9\x8a\x8e\x94\x66\xce\xac\x15\xbd\x2d\x92\x76\x47\x69\x35\xdd\x73\xc0\xd8\xf5\xae\x50\x95\xf2\x16\x2e\xc1\x01\xaf\x21\x1e\x03\x78\xc4\x78\xfb\x6f\x5d\xa6\x8f\xad\x4f\xfe\x4a\x11\xd3\x13\xf1\x98\xa8\x72\xc0\x63\x53\x34\x68\x2f\xcd\xf5\x14\x92\xe5\x23\x11\x70\x57\xa9\x42\x0b\x26\x4b\xdb\xf1\x38\xef\xbd\x73\x7f\x88\x96\x45\x67\x0c\x66\xfa\x0b\x00\x00\xff\xff\x8a\x18\x1b\xc5\x9f\x00\x00\x00")
var _complySoc2PoliciesVendorMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\xcd\x6e\x1b\x47\x0c\xbe\xeb\x29\x08\xe4\x62\x03\xb2\xd0\xe4\xd4\xfa\xe6\xe4\xa4\x22\x01\x8c\x58\xc8\x9d\x9a\xa1\x76\xd9\xcc\xce\x6c\xc9\x59\x19\xdb\x53\x5f\xa3\xaf\xd7\x27\x29\x38\xb3\x2b\xad\x63\xc9\xe9\xc9\xf2\xce\x0f\x3f\x7e\xfc\xf8\x71\x22\x76\x74\x0f\xdf\x28\xfa\x24\xf0\x05\x23\x36\xd4\x51\xcc\xf0\x98\x02\xbb\x71\x85\x4e\x52\x1c\xbb\x7b\xf8\xf6\xe5\x71\xa5\x98\x59\x0f\x4c\x7a\xbf\x02\xd8\x3d\x7d\xb2\x3f\x00\x77\xf0\xe9\xd3\x6f\x9b\x0f\xab\x0e\xff\x48\xf2\x95\x8e\xac\x9c\x62\xd9\x72\x07\x1e\x33\xdd\xc3\xef\x43\x84\xf7\xf0\xe1\x97\xf7\xbf\x96\x03\x2e\x75\x16\xe2\x1e\xb6\x91\x33\x63\x00\x9f\xdc\x60\x5f\x56\x77\x77\x77\xab\xd5\x3b\x78\x1c\xa4\x4f\x4a\x80\xd1\xc3\x93\x4b\x3d\xad\x56\xb8\x81\x5d\xcb\x0a\x7d\x81\x05\x9e\x0e\x1c\x49\x21\xb7\x04\x32\x04\x52\x38\x24\x01\xa1\x80\xd9\x82\xb7\xdc\x2b\x3c\x73\x6e\xcb\x86\x24\x0d\x46\xfe\xab\x2c\xfd\xfb\xf7\x3f\x0a\xdb\x78\x48\xd2\x95\xff\x61\x47\xae\x8d\x29\xa4\x66\x84\x9b\xed\xee\x16\x8e\x85\x09\x2d\xb1\x7b\x94\x1c\x49\x74\x03\xaf\x00\x60\xdf\x07\x36\x00\x09\x30\x04\xd8\xee\x2e\x1e\x84\xe7\x36\x41\x8b\x47\x2a\x38\x70\xcf\x81\xf3\x68\x67\xb8\xeb\xd1\xe5\xf2\xd5\xa5\x78\x60\x4f\xd1\x98\xe0\x3c\xae\x81\x63\xa6\x46\xca\x4f\xbb\x0b\x8f\xc8\x61\x3e\x9a\x0e\x97\x33\xca\xe7\x2c\xec\x8c\x52\x54\xce\x7c\x24\xe0\x73\xaa\x6b\x48\x52\x00\xa1\x50\x21\x87\x63\xb9\x4c\x8d\xe1\xab\x37\x2f\x2e\x00\x25\x37\x18\x30\xe8\x25\x35\x82\xdd\xe6\x67\xb4\x50\xd7\x87\x34\x12\x55\x56\x5c\x8a\x59\xd0\x65\x63\x29\xb7\x98\x0b\x10\x21\xed\x53\x54\xde\x07\x2a\x35\x34\x10\xdd\x59\x86\x76\x2e\x1d\x49\x94\x9b\x36\x1b\xc8\x6b\x4c\x5f\xc0\x6f\x65\x7b\x07\x1f\xd1\x7d\x6f\x24\x0d\xd1\x4f\x68\xa9\x5c\x68\xf0\x4e\xf9\x5c\x38\x0c\xac\xd0\x72\xd3\x06\x13\x5b\x4f\xd1\x2a\x04\x69\x62\x6c\x71\x8e\xb3\x9e\x12\x1b\x30\x5c\xd2\xa0\x6d\xd9\xee\x40\x87\x42\xce\x2b\x6d\x5d\xd2\x35\x08\xfd\x39\xb0\x14\x0e\xaa\xb6\xe9\x70\x20\x57\x4a\xfa\x16\x3b\x3a\xb8\xf6\x4a\x20\x38\x48\xea\x00\xe3\x95\x8a\x92\x68\x5f\x03\x54\x8e\x26\x40\xbd\x90\x3a\xe1\x3d\x29\x74\x1c\xb9\x1b\x3a\xd0\x8c\xd1\xa3\x78\x05\x9c\x4a\x01\xdd\xa0\x19\x3a\xa2\xfc\x76\x90\x72\xb2\x4f\x1c\xb3\xa9\xdc\x85\xc1\x73\x6c\xce\xcb\x2e\xe0\xa0\xa4\x6b\x10\xd6\xef\x80\xaa\xa4\x5a\x08\x58\x83\x92\x1c\xd9\x11\x04\x3a\x52\x00\x6c\x84\x68\x5a\xb1\x14\x39\xba\xd2\x41\x0b\x6a\x36\x56\xfa\xaf\x74\x20\xa1\xe8\x48\x4b\xe9\x97\x6d\xff\x34\x07\x9d\x7c\xce\xd6\x4f\xdf\xb6\xf3\x7d\x5f\xab\x38\xe9\xb4\xeb\xdd\x72\xff\x52\x8a\x42\xd6\x14\x2d\xef\x39\x93\x9f\x58\x70\x8e\x54\x2d\xc3\xff\xdf\x58\x96\x74\x56\x18\x62\xe6\x00\x80\x27\x65\x95\x1f\xc8\xf1\x25\x5f\xb6\x98\x82\x9a\x54\x0b\x25\xbe\xf4\x9d\xf5\x3f\x37\x91\x3c\xec\xc7\xea\x3b\x7d\x2f\xa9\x17\xc6\x4c\x45\x0d\x4c\x5a\x3b\xf7\xe1\xa5\x73\x95\x2a\xba\xd4\xf5\x61\x3c\x9b\xe7\x59\x21\x96\xb8\xf5\x76\xd5\xa8\x2f\x81\x3c\x09\x1f\xe7\x84\x6d\xfb\x1b\x1c\xc3\x8d\xcc\xf5\x80\x1b\xbc\xbd\x9d\x5c\xf5\x61\xd9\x88\x73\x25\xd5\xb0\x2f\xa0\x25\x39\xeb\xb8\xc0\xdc\xd3\x69\x6a\x90\x09\xc0\xd8\x4e\xe2\xd1\x2e\xbf\x6e\xfc\x3f\xad\xf0\x0b\x8c\xfb\xdb\xdb\x2a\xaf\xae\x23\x6f\xec\x85\xd1\x5a\xf1\x19\xc5\x57\xaa\xaf\x26\x5c\xe7\xa8\xc0\xcd\xf6\xe9\xcb\xed\x69\x7a\xfc\x60\x30\x25\x0f\xf4\x2d\x09\xcd\x97\x65\x92\xae\x58\x99\xb9\xd3\xd3\xa4\xf9\xcf\x45\xf3\x0f\x27\xcd\xc3\xcd\xd3\xe7\x07\xbd\x05\x4b\x5d\x4a\xf2\x39\xd5\x9c\xcf\x84\x6d\xe0\x41\xa7\xdb\x4c\x9a\x43\x6f\x83\xd8\x4f\x03\x45\x21\xd2\x33\x24\xf3\x19\x5b\x5c\xde\xb3\x7e\xed\x83\x05\x26\x77\x7d\x98\x2d\x67\x04\xd7\x62\x6c\xa8\x54\xe5\xa4\xc1\x48\x34\xb1\x42\x51\x07\x21\xe0\x0c\x42\x1d\x72\x34\xa9\x57\x59\xb1\x95\x67\xa2\xe3\x23\x1d\xd2\x1c\xdc\x54\x5d\xb2\x58\x08\xde\x90\x36\x93\xe2\x6b\x27\xcd\x24\xf5\x28\xc5\x8b\xdf\x6c\xa8\x51\x33\x75\xba\x7e\x25\xef\x21\x7a\x92\xc6\x22\xfd\x60\x32\x06\xcb\x1e\x27\xbc\xf0\x01\xdb\xa2\xd5\xd1\x6b\x6a\xd7\x86\xcf\x2c\xc9\x3a\xc7\x0f\x4c\x1e\xfc\x20\x73\xeb\xff\x10\xc9\x8c\xc2\xf2\xd9\x40\x09\x38\x47\xdd\x5d\xd8\x59\xb9\xaf\x97\x5e\x80\xb3\xcc\xb8\x0e\xd8\xae\x1b\x22\xbb\xfa\xe5\xfc\x26\x58\x5b\xcd\x9f\x29\x04\xfb\x7b\x3d\xa9\x3a\x38\x4a\x79\x39\xea\xba\x46\x30\x97\x26\xd0\x61\x7f\x77\x1a\x2b\x53\x05\xdf\x24\x63\x12\x41\x19\xf3\xcb\x4a\x95\x09\x69\x9d\x7a\xf2\xcc\x4c\xae\x48\x53\xf1\x40\xcd\x50\x7a\xab\xea\xd4\xb3\xda\x2b\xd0\x97\xb9\x66\x25\xa1\x30\x5e\x68\x23\xcd\xc2\x2e\x87\x71\x6a\xa5\xd3\xdb\xa3\x3c\x45\x1c\xda\xbb\x22\x50\x83\x61\x0d\x42\xcd\x10\x30\x27\x19\x5f\x3c\x47\xea\xd4\x5e\x8c\x5b\xa1\x06\xc5\xcf\xd5\x73\x29\x04\x1b\x8c\xf6\x80\x9a\x2a\xc7\xb1\xa9\x10\xb3\x60\xd4\x8e\xd5\x5e\xbc\x15\xe6\xfc\xee\xf2\x98\xb1\x4e\x63\x54\x78\x24\x51\x4b\x3d\x8c\x77\xdb\x49\x21\x05\xd7\xd2\x3b\x6e\x1e\xb7\xdb\xeb\x56\x81\x56\x95\x64\x6f\x62\xcb\x6e\xf0\x9c\xaf\xd2\x7f\xee\xc0\x73\xcf\x55\x7b\x58\x30\xf2\xca\xd5\x5f\x48\xe4\xe7\x7c\xa5\x7d\xe0\xa6\xbe\x73\x36\xab\xff\x02\x00\x00\xff\xff\x5f\xf9\x43\x4e\x43\x0c\x00\x00")
func complySoc2PoliciesVendorMdBytes() ([]byte, error) {
return bindataRead(
@@ -997,7 +997,7 @@ func complySoc2PoliciesVendorMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/vendor.md", size: 159, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/vendor.md", size: 3139, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -1017,7 +1017,7 @@ func complySoc2PoliciesWorkstationMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/policies/workstation.md", size: 1791, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/policies/workstation.md", size: 1791, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -1037,12 +1037,12 @@ func complySoc2ProceduresReadmeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/procedures/README.md", size: 92, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/procedures/README.md", size: 92, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2ProceduresOffboardingMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xca\x4c\xb1\x52\x50\xca\x4f\x4b\x4b\xca\x4f\x2c\x4a\x51\xe2\xca\x4b\xcc\x4d\xb5\x52\x50\xf2\x87\x0a\x28\x84\x16\xa7\x16\x29\x71\xe9\xea\xea\x72\x71\x29\x2b\x38\xe7\xe7\x66\xe6\xa5\x2b\x04\xe7\xe7\xe7\x01\x02\x00\x00\xff\xff\x79\xa2\x7a\x63\x37\x00\x00\x00")
var _complySoc2ProceduresOffboardingMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\x8e\xb1\x6e\x2a\x31\x10\x45\xfb\xfd\x8a\xab\xed\xf7\x07\xe8\x5e\xf7\x22\x45\x42\x02\x25\x4d\x94\x62\xf0\xce\x86\x11\xb6\xc7\x99\x19\x93\xf0\xf7\x11\xb0\x05\x94\xf6\x9c\x73\x75\x64\xde\x60\xd4\x65\x39\x28\xd9\x3c\x0e\x95\x0a\x6f\x30\x6e\xd7\x0f\xbc\x39\xdb\x38\x4c\xd3\x34\x0c\x3b\x76\xcd\x67\x46\x1c\xc5\x11\x92\x4e\x1c\x38\x5c\xc0\xbf\x9c\x7a\x48\xfd\x42\x1c\x19\x8b\xe6\xac\x3f\xd7\x97\x07\x37\xdf\x0c\xc3\x84\x0f\x7c\xe2\xa5\x14\x9e\x85\x82\xf3\x05\xde\xbd\x71\x9d\xd1\x9d\x0d\x52\xb1\xdf\x6f\x57\xea\x5f\xbb\x1d\xfe\xef\x10\x6c\x45\x2a\x85\x68\x85\xf1\x77\x67\x0f\xf0\x54\x48\x32\x42\x1f\x13\x56\xf3\x55\xf5\x84\xde\x50\xa8\x76\xca\xf9\x32\x35\xd3\xb3\xb8\x68\xe5\x19\xd4\x5a\x96\x74\x1b\x73\x2c\x6a\x77\xdf\x34\x33\xd4\x6e\x19\xeb\xca\x3b\x65\x99\x29\x18\x94\x12\xbb\xc3\xf8\xac\x77\xef\xda\xc9\x94\x8e\xcf\xa1\x49\xeb\x22\x56\xee\x84\xda\x23\xff\x5c\xf9\x17\x00\x00\xff\xff\xdb\xfb\x69\xca\x66\x01\x00\x00")
func complySoc2ProceduresOffboardingMdBytes() ([]byte, error) {
return bindataRead(
@@ -1057,12 +1057,12 @@ func complySoc2ProceduresOffboardingMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/procedures/offboarding.md", size: 55, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/procedures/offboarding.md", size: 358, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2ProceduresOnboardingMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xca\x4c\xb1\x52\x50\xca\xcf\x4b\xca\x4f\x2c\x4a\x51\xe2\xca\x4b\xcc\x4d\xb5\x52\x50\xf2\x87\xf0\x15\xfc\x52\xcb\x15\x42\x8b\x53\x8b\x94\xb8\x74\x75\x75\xb9\xb8\x94\x15\x9c\xf3\x73\x33\xf3\xd2\x15\x82\xf3\xf3\xf3\x00\x01\x00\x00\xff\xff\x25\x8d\x3b\x1b\x39\x00\x00\x00")
var _complySoc2ProceduresOnboardingMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\xb1\x6e\xeb\x30\x0c\x45\x77\x7f\xc5\x85\xa7\xf7\x06\xff\x40\xb6\x6e\x9d\xda\x22\x40\xa7\xa2\x03\x23\xd1\x36\x51\x59\x52\x45\xda\xae\xff\xbe\x90\x93\xb4\xc9\xd4\x91\x82\x0e\xcf\xe5\x15\x7f\x40\x9b\xe2\x29\x51\xf1\x6d\x13\x69\xe2\x03\xda\xe7\xf3\x8c\x27\x5e\xf1\xaa\x5c\xda\xa6\xeb\xba\xa6\x39\xb2\xa6\xb0\x30\x6c\x14\x85\x89\xfb\x60\xc3\x69\x03\x7f\xb1\x9b\x4d\xe2\x00\x1b\x19\x7d\x0a\x21\xad\x75\x52\xe3\xac\x87\xa6\xe9\xf0\x86\x77\x3c\xe4\xcc\xd1\xe3\xf1\x08\xf2\x1e\x85\x3f\x67\x56\x03\x77\x13\x49\x80\xa5\xdb\xa5\x17\xe2\xa5\x24\x72\x26\x0b\x87\x0d\x0b\x05\xf1\x64\x8c\x92\x02\x83\x54\x65\x88\x13\x47\xc3\x2a\x36\x62\xa2\x48\x03\x17\xfc\x53\xe6\x6a\xb8\xdf\xfe\xff\x9a\xc0\x7b\xcc\xca\xa5\xda\x3c\xf7\x34\x07\xc3\x50\xd2\x9c\xd1\xa7\xb2\x67\xd7\xcc\x4e\x7a\x61\xbf\x6b\x7e\x63\x2c\xa2\x92\x22\x28\x6e\x55\x35\x53\x08\x5b\x97\xaf\xcf\xec\x41\x39\x07\x71\x64\x92\xa2\xd6\x46\x76\x1a\x00\xee\x4e\x3f\xa3\xf8\x01\x6b\x47\x2e\xc5\x5e\xca\xb4\xa3\x7f\xb7\x70\xf9\x7d\x3e\x3a\xf2\x7a\x39\x67\x24\xab\xf1\x37\x38\x8a\x20\xe7\x58\x15\x14\x6e\x4c\xec\xa1\x9b\x1a\x4f\xfa\x1d\x00\x00\xff\xff\x15\x19\xab\x13\xef\x01\x00\x00")
func complySoc2ProceduresOnboardingMdBytes() ([]byte, error) {
return bindataRead(
@@ -1077,12 +1077,12 @@ func complySoc2ProceduresOnboardingMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/procedures/onboarding.md", size: 57, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/procedures/onboarding.md", size: 495, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2ProceduresPatchMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xca\x4c\xb1\x52\x50\x2a\x48\x2c\x49\xce\x50\xe2\xca\x4b\xcc\x4d\xb5\x52\x50\x72\x2c\x28\xc8\xa9\x54\xf0\x0f\x56\x00\x0b\xa7\x16\x2b\x71\x25\x17\xe5\xe7\x59\x29\x28\x19\x28\x18\x28\x18\x2a\x68\x81\xa0\x12\x97\xae\xae\x2e\x17\x97\xb2\x82\x73\x7e\x6e\x66\x5e\xba\x42\x70\x7e\x7e\x1e\x20\x00\x00\xff\xff\xc9\x3a\x76\x1b\x4b\x00\x00\x00")
var _complySoc2ProceduresPatchMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x90\x31\x4b\x03\x41\x10\x85\xfb\xfd\x15\x8f\xb3\x0b\x9c\x68\x61\x73\x20\x92\x22\x45\x1a\x13\x14\x6c\x44\xc8\xba\x37\xb9\x5b\xdc\xec\x2c\x33\xb3\xd1\xfc\x7b\xb9\xf3\x0a\x8b\x30\xd5\x3c\xbe\x79\x8f\x37\xb1\xef\xd0\x14\x6f\x61\x6c\x5c\xf6\x27\xea\xd0\xac\x4b\x49\x17\xec\x5e\x31\xcb\xa4\x8d\x0b\xc2\xb9\x43\x73\x87\x69\xee\x1f\xb0\xc2\xaa\x71\x6d\xdb\x3a\x77\x33\x71\xfb\x89\xc3\x5e\x38\x50\x5f\x85\x9c\x7b\x21\xe5\x74\x26\xd8\x18\x15\x16\xc3\x17\x19\x3e\x2f\xa0\x1f\x0a\xd5\x62\x1e\x60\x23\xe1\xc8\x29\xf1\xf7\xb4\xa9\x51\xd1\xce\xb9\x16\xef\xf8\xc0\xbe\xa6\x34\x03\xc9\x1b\xa9\x41\x83\xc4\x62\x8a\xa3\xf0\x69\xd6\x77\x45\x21\x54\x58\xa3\xb1\x5c\x96\xab\xcd\xec\x4d\x38\x6c\x9e\xdf\x1e\xd5\xfc\x30\x19\xcf\x05\x5a\x9f\xd2\xad\x8e\x87\x05\xdc\x66\x2d\x14\x0c\x5c\xad\x54\x73\x00\xb0\x38\x88\xb0\xe8\x13\xb6\xf9\x4c\x6a\x71\xf0\x46\xf0\xb9\x87\xfc\x95\xb9\x96\x53\x84\xfb\x1a\x2c\x72\xbe\x1a\xb5\x36\xf3\x61\x44\xe2\x61\x49\x83\xf1\xff\x9f\xfc\x06\x00\x00\xff\xff\xb4\x3f\x07\x6a\x7c\x01\x00\x00")
func complySoc2ProceduresPatchMdBytes() ([]byte, error) {
return bindataRead(
@@ -1097,12 +1097,12 @@ func complySoc2ProceduresPatchMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/procedures/patch.md", size: 75, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/procedures/patch.md", size: 380, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _complySoc2ProceduresWorkstationMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xca\x4c\xb1\x52\x50\x2a\xcf\x2f\xca\x2e\x2e\x49\x2c\xc9\xcc\xcf\x53\xe2\xca\x4b\xcc\x4d\xb5\x52\x50\x72\xce\xcf\xc9\x49\x4d\x2e\x51\x08\x47\xc8\x29\xb8\xa4\x96\x24\x66\xe6\x14\x2b\x71\x25\x17\xe5\xe7\x59\x29\x28\x19\x28\x18\x28\x68\x41\xa0\x12\x97\xae\xae\x2e\x17\x97\xb2\x82\x73\x7e\x6e\x66\x5e\xba\x42\x70\x7e\x7e\x1e\x20\x00\x00\xff\xff\x20\xf0\xa9\x34\x5c\x00\x00\x00")
var _complySoc2ProceduresWorkstationMd = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x92\x41\x4f\x1b\x31\x10\x85\xef\xfe\x15\x4f\x39\x22\x82\x5a\xa9\xbd\xec\x0d\x05\x10\x51\xa9\x40\x64\x5b\x0e\x08\x09\xc7\x3b\x9b\xb8\xf1\xce\x6c\x3d\xe3\xa4\xf9\xf7\xd5\x86\x10\xd2\xaa\x95\x7a\xad\xf6\xb0\x1a\x7b\xec\xf7\xfc\xbd\x89\x4d\x85\xd1\x46\xf2\x4a\xcd\x5b\x14\x1e\x39\xf6\x1d\x55\x18\x4d\x24\x25\x0a\x86\x87\xb7\x3d\x5c\x90\xf9\x98\x74\xe4\x42\x16\xae\x30\x7a\x87\xe1\x7b\xff\x11\x1f\x70\x32\x72\xe3\xf1\xd8\xb9\x7b\x52\x49\x6b\x82\x2d\xa3\xc2\x62\x58\x91\x61\xbe\x05\xfd\xa0\x50\x2c\xf2\x02\xb6\x24\xb4\x92\x92\x6c\x86\x4a\x8d\x7a\xad\x9c\x1b\xe3\x11\x4f\x98\x11\x37\xbb\x86\x20\x5d\x57\x38\x86\x9d\xac\x62\x4e\x49\x36\xfb\x9e\x2b\xc9\xf0\xbc\x05\x75\x3e\x26\x64\xea\x53\x24\x3d\x85\x37\xf3\x61\x89\x20\x6c\xc4\x06\x93\x63\x07\xfb\xa3\x5f\x7d\x8a\x8d\x37\x42\x26\xed\x85\x95\x14\x3e\x0f\x55\xa0\xb8\xa6\x06\x6d\x96\x0e\xe4\xc3\xd2\x39\xf7\xfc\xfc\xec\x6a\xa9\x70\x41\xba\x32\xe9\xa1\xa5\xef\x25\x9b\x9b\x95\xf9\x37\x0a\x56\xe1\x9c\xb9\xf8\x84\x23\x74\x88\xbc\x26\x36\xc9\x5b\xe7\xee\x12\x79\xa5\x57\x57\xbb\x27\x95\x9c\x07\x67\x7f\x3c\xf0\xe2\xf7\x18\xcc\x8b\xf1\x0a\x8f\xf7\x97\x77\x37\xe7\x93\x4b\x3c\x4c\xeb\x6b\x7c\xb9\xbf\x41\x7d\x8b\xfa\x7a\x3a\x43\x3d\x9d\x7c\xba\xac\x9f\x9c\xab\x97\xf4\x97\x5b\x75\xe9\x53\x42\xe4\x90\x4a\x43\xbf\x09\xb4\x91\x52\xa3\x95\x03\x80\x13\xcc\x28\x47\x9f\xc0\xa5\x9b\x53\xde\xaf\x4d\x8a\x9a\x34\xd1\xf3\xbe\xbe\x2a\x29\xa1\x89\xba\x02\x71\xc8\xdb\x7e\x27\x36\x88\x16\xdd\x77\x7c\xf6\x69\x33\x10\xed\xb3\x18\x85\xe3\xfd\x01\xe7\x1b\xd4\xdb\x62\x2a\x25\x07\x6a\x30\x19\x1c\x4e\x88\x8d\x32\xa6\xf5\xbf\xe2\x3d\x57\xf4\x3e\x1b\xa4\x85\x94\x0c\xe1\x85\x0c\x6f\x0a\xd2\xf5\x29\x7a\x0e\x04\x6a\x5b\xc9\xa6\xf0\xdc\xa0\xa7\xbc\x6b\x53\xca\xeb\x18\x86\xd4\x17\x99\xa8\x23\xb6\x53\x6c\x86\xfc\xbf\x97\x98\x09\xfe\x90\xd1\x1b\x41\x69\x8f\x2d\x28\x22\xa3\x28\x0d\xbf\x81\xe6\xfe\xc2\x57\x1b\x3e\x04\x29\x6c\x67\x87\xf8\x5f\xe6\xac\x39\x4c\x63\x47\xaa\x7e\x41\xd8\x44\xfb\x75\x28\x0e\x82\x67\xff\x4d\x9c\x3f\x03\x00\x00\xff\xff\x89\xd2\xee\x65\x39\x04\x00\x00")
func complySoc2ProceduresWorkstationMdBytes() ([]byte, error) {
return bindataRead(
@@ -1117,7 +1117,7 @@ func complySoc2ProceduresWorkstationMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/procedures/workstation.md", size: 92, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/procedures/workstation.md", size: 1081, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -1137,7 +1137,7 @@ func complySoc2StandardsReadmeMd() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/standards/README.md", size: 282, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/standards/README.md", size: 282, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -1157,7 +1157,7 @@ func complySoc2StandardsTsc2017Yml() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/standards/TSC-2017.yml", size: 16305, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/standards/TSC-2017.yml", size: 16305, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -1177,7 +1177,7 @@ func complySoc2TemplatesDefaultLatex() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/templates/default.latex", size: 7649, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/templates/default.latex", size: 7649, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@@ -1197,7 +1197,7 @@ func complySoc2TemplatesIndexAce() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "comply-soc2/templates/index.ace", size: 7596, mode: os.FileMode(420), modTime: time.Unix(1526668702, 0)}
info := bindataFileInfo{name: "comply-soc2/templates/index.ace", size: 7596, mode: os.FileMode(420), modTime: time.Unix(1545086630, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}

View File

@@ -5,7 +5,9 @@ import (
"sort"
"time"
"github.com/pkg/errors"
"github.com/robfig/cron"
"github.com/strongdm/comply/internal/config"
"github.com/strongdm/comply/internal/model"
)
@@ -68,7 +70,10 @@ func TriggerScheduled() error {
// in the future, nothing to do
continue
}
trigger(procedure)
err = trigger(procedure)
if err != nil {
return err
}
} else {
// don't go back further than 13 months
tooOld := time.Now().Add(-1 * time.Hour * 24 * (365 + 30))
@@ -88,7 +93,10 @@ func TriggerScheduled() error {
}
// is in the past? then trigger.
trigger(procedure)
err = trigger(procedure)
if err != nil {
return err
}
break SEARCH
}
}
@@ -97,13 +105,18 @@ func TriggerScheduled() error {
return nil
}
func trigger(procedure *model.Procedure) {
func trigger(procedure *model.Procedure) error {
fmt.Printf("triggering procedure %s (cron expression: %s)\n", procedure.Name, procedure.Cron)
// TODO: don't hardcode GH
tp := model.GetPlugin(model.GitHub)
tp.Create(&model.Ticket{
ts, err := config.Config().TicketSystem()
if err != nil {
return errors.Wrap(err, "error in ticket system configuration")
}
tp := model.GetPlugin(model.TicketSystem(ts))
err = tp.Create(&model.Ticket{
Name: procedure.Name,
Body: fmt.Sprintf("%s\n\n\n---\nProcedure-ID: %s", procedure.Body, procedure.ID),
}, []string{"comply", "comply-procedure"})
return err
}

BIN
logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

View File

@@ -17,6 +17,78 @@ majorRevisions:
# Control Environment Narrative
Here we narrate why our control environment satisfies the control keys listed in the YML block
The following provides a description of the control structure of {{.Name}}.
# Template Coming Soon
The intent of this description is to enumerate the logical, policy, and procedural controls that serve to monitor {{.Name}}'s application and data security. Changes uncovered by these procedures in the logical, policy, procedural, or customer environment are addressed by remediations specific to the noted change.
# Logical Controls
{{.Name}} employs several logical controls to protect confidential data and ensure normal operation of its core product.
- Mandatory data encryption at rest and in motion
- Multi-factor authentication for access to cloud infrastructure
- Activity and anomaly monitoring on production systems
- Vulnerability management program
# Policy Controls
{{.Name}} employs several policy controls to protect confidential data and ensure normal operation of its core product. These policies include, but are not limited to:
- Access Control Policy
- Encryption Policy
- Office Security Policy
- Password Policy
- Policy Training Policy
- Vendor Policy
- Workstation Policy
# Procedural Controls
{{.Name}} has numerous scheduled procedures to monitor and tune the effectiveness of ongoing security controls, and a series of event-driven procedures to respond to security-related events.
TODO: Finalize these lists
## Scheduled Security and Audit Procedures
- Review Access [quarterly]
- Review Security Logs [weekly]
- Review Cyber Risk Assessment (enumerate possible compromise scenarios) [quarterly]
- Review Data Classification [quarterly]
- Backup Testing [quarterly]
- Disaster Recovery Testing [semi-annual]
- Review Devices & Workstations [quarterly]
- Review & Clear Low-Priority Alerts [weekly]
- Apply OS Patches [monthly]
- Verify Data Disposal per Retention Policy [quarterly]
- Conduct Security Training [annual]
- Review Security Monitoring and Alerting Configuration [quarterly]
- Penetration Test [annual]
- Whitebox Security Review [annual]
- SOC2 Audit [annual]
## Event-Driven Security and Audit Procedures
- Onboard Employee
- Offboard Employee
- Investigate Security Alert
- Investigate Security Incident
# Remediations
{{.Name}} uses the outcomes of the aforementioned controls and procedures to identify shortcomings in the existing control environment. Once identified, these shortcomes are remediated by improving existing controls and procedures, and creating new controls and procedures as needed.
# Communications
{{.Name}} communicates relevant information regarding the functioning of the above controls with internal and external parties on an as-needed basis and according to statutory requirements.
## Internal
{{.Name}} communicates control outcomes, anomalies, and remediations internally using the following channels:
- Slack
- Email
- Github ticketing
## External
{{.Name}} communicates relevant control-related information to external parties including shareholders, customers, contractors, regulators, and government entities as needed according to contractual and regulatory/statutory obligation.

View File

@@ -9,4 +9,39 @@ majorRevisions:
Here we describe the key products marketed by our organization
# Template Coming Soon
# Products
## Product 1
Overview of product 1
### Architecture
Brief architectural discussion of product 1
### Security Considerations
Specific security considerations for product 1. Refer to policies, procedures here.
# References
## Narratives
List relevant narratives, probably including
Organizational Narrative
Security Narrative
System Narrative
## Policies
List relevant policies, probably including
Application Security Policy
Datacenter Policy
Log Management Policy
Password Policy
Security Incident Response Policy
Risk Assessment Policy
## Procedures
List relevant procedures, probably including access review, patching, alert monitoring, log review, pen testing

View File

@@ -15,4 +15,99 @@ majorRevisions:
Here we narrate why our org satisfies the control keys listed in the YML block
# Template Coming Soon
# {{.Name}} Product Architecture
Describe product architecture here, emphasizing security implications
# {{.Name}} Infrastructure
## Product Infrastructure
Describe product infrastructure, emphasizing security measures
### Authorized Personnel
- **AWS root account** access is granted only to the CTO and CEO
- **AWS IAM** access is granted to to a limited group of **Operators**
- **{{.Name}} SSH** access is granted to a limited group of **Operators**
- **{{.Name}} DB** access is granted to a limited group of **Data Operators**
## IT Infrastructure
{{.Name}} uses the following cloud services for its internal infrastructure:
- List cloud services
Access to these cloud services is limited according to the role of the {{.Name}} employee and is reviewed quarterly as well as via regular onboarding/offboarding tasks for new and departing employees.
# {{.Name}} Workstations
{{.Name}} workstations are hardened against logical and physical attack by the following measures:
- operating system must be within one generation of current
- full-disk encryption
- onboard antivirus/antimalware software
- OS and AV automatically updated
Workstation compliance with these measures is evaluated on a quarterly basis.
## Remote Access
Many {{.Name}} employees work remotely on a regular basis and connect to production and internal IT systems via the same methods as those employees connecting from the {{.Name}} physical office, i.e., direct encrypted access to cloud services. It is the employee's responsibility to ensure that only authorized personnel use {{.Name}} resources and access {{.Name}} systems.
# Access Review
Access to {{.Name}} infrastructure, both internal and product, is reviewed quarterly and inactive users are removed. Any anomalies are reported to the security team for further investigation. When employees start or depart, an onboarding/offboarding procedure is followed to provision or deprovision appropriate account access.
# Penetration Testing
{{.Name}} commissions an external penetration test on an annual basis. All findings are immediately reviewed and addressed to the satisfaction of the CTO/CEO.
# {{.Name}} Physical Security
{{.Name}} has one physical location, in San Francisco, CA. Key issuance is tracked by the Office Physical Security Policy Ledger. Office keys are additionally held by the lessor, property management, and custodial staff. These keys are not tracked by the Office Physical Security Policy Ledger. {{.Name}} managers regularly review physical access privileges.
{{.Name}} infrastructure is located within AWS. {{.Name}} does not have physical access to AWS infrastructure.
# Risk Assessment
{{.Name}} updates its Cyber Risk Assessment on an annual basis in order to keep pace with the evolving threat landscape. The following is an inventory of adversarial and non-adversarial threats assessed to be of importance to {{.Name}}.
## Adversarial Threats
The following represents the inventory of adversarial threats:
|Threat|Source|Vector|Target|Likelihood|Severity|
|----------------------------+--------------+------------+-----------------+----------+------|
| | | | | | |
## Non-Adversarial Threats
The following represents the inventory of non-adversarial threats:
|Threat|Vector|Target|Likelihood|Severity|
|----------------------------+--------------+-------------+----------+------|
| | | | | |
# References
## Narratives
Products and Services Narrative
System Architecture Narrative
## Policies
Encryption Policy
Log Management Policy
Office Security Policy
Remote Access Policy
Security Incident Response Policy
Workstation Policy
## Procedures
Apply OS Patches
Review & Clear Low-Priority Alerts
Review Access
Review Devices & Workstations

View File

@@ -9,5 +9,49 @@ majorRevisions:
- date: Jun 1 2018
comment: Initial document
---
# Purpose and Scope
a. The purpose of this policy is to define procedures to onboard and offboard users to technical infrastructure in a manner that minimizes the risk of information loss or exposure.
a. This policy applies to all technical infrastructure within the organization.
a. This policy applies to all full-time and part-time employees and contractors.
# Background
a. In order to minimize the risk of information loss or exposure (from both inside and outside the organization), the organization is reliant on the principle of least privilege. Account creation and permission levels are restricted to only the resources absolutely needed to perform each persons job duties. When a users role within the organization changes, those accounts and permission levels are changed/revoked to fit the new role and disabled when the user leaves the organization altogether.
# Policy
a. *During onboarding:*
i. Hiring Manager informs HR upon hire of a new employee.
i. HR emails IT to inform them of a new hire and their role.
i. IT creates a checklist of accounts and permission levels needed for that role.
i. The owner of each resource reviews and approves account creation and the
associated permissions.
i. IT works with the owner of each resource to set up the user.
a. *During offboarding:*
i. Hiring Manager notifies HR when an employee has been terminated.
i. HR sends a weekly email report to IT summarizing list of users terminated and instructs IT to disable their access.
i. IT terminates access within five business days from receipt of notification.
a. *When an employee changes roles within the organization:*
i. Hiring Manager will inform HR of a change in role.
i. HR and IT will follow the same steps as outlined in the onboarding and offboarding procedures.
a. *Review of accounts and permissions:*
i. Each month, IT and HR will review accounts and permission levels for accuracy.
# Coming Soon

View File

@@ -9,4 +9,92 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define requirements for proper controls to protect the availability of the organizations information systems.
a. This policy applies to all users of information systems within the organization. This typically includes employees and contractors, as well as any external parties that come into contact with systems and information controlled by the organization (hereinafter referred to as “users”). This policy must be made readily available to all users.
# Background
a. The intent of this policy is to minimize the amount of unexpected or unplanned downtime (also known as outages) of information systems under the organizations control. This policy prescribes specific measures for the organization that will increase system redundancy, introduce failover mechanisms, and implement monitoring such that outages are prevented as much as possible. Where they cannot be prevented, outages will be quickly detected and remediated.
a. Within this policy, an availability is defined as a characteristic of information or information systems in which such information or systems can be accessed by authorized entities whenever needed.
# References
a. Risk Assessment Policy
# Policy
a. Information systems must be consistently available to conduct and support business operations.
a. Information systems must have a defined availability classification, with appropriate controls enabled and incorporated into development and production processes based on this classification.
a. System and network failures must be reported promptly to the organizations lead for Information Technology (IT) or designated IT operations manager.
a. Users must be notified of scheduled outages (e.g., system maintenance) that require periods of downtime. This notification must specify the date and time of the system maintenance, expected duration, and anticipated system or service resumption time.
a. Prior to production use, each new or significantly modified application must have a completed risk assessment that includes availability risks. Risk assessments must be completed in accordance with the Risk Assessment Policy (reference (a)).
a. Capacity management and load balancing techniques must be used, as deemed necessary, to help minimize the risk and impact of system failures.
a. Information systems must have an appropriate data backup plan that ensures:
i. All sensitive data can be restored within a reasonable time period.
i. Full backups of critical resources are performed on at least a weekly basis.
i. Incremental backups for critical resources are performed on at least a daily basis.
i. Backups and associated media are maintained for a minimum of thirty (30) days and retained for at least one (1) year, or in accordance with legal and regulatory requirements.
i. Backups are stored off-site with multiple points of redundancy and protected using encryption and key management.
i. Tests of backup data must be conducted once per quarter. Tests of configurations must be conducted twice per year.
a. Information systems must have an appropriate redundancy and failover plan that meets the following criteria:
i. Network infrastructure that supports critical resources must have system-level redundancy (including but not limited to a secondary power supply, backup disk-array, and secondary computing system). Critical core components (including but not limited to routers, switches, and other devices linked to Service Level Agreements (SLAs)) must have an actively maintained spare. SLAs must require parts replacement within twenty-four (24) hours.
i. Servers that support critical resources must have redundant power supplies and network interface cards. All servers must have an actively maintained spare. SLAs must require parts replacement within twenty-four (24) hours.
i. Servers classified as high availability must use disk mirroring.
a. Information systems must have an appropriate business continuity plan that meets the following criteria:
i. Recovery time and data loss limits are defined in Table 3.
i. Recovery time requirements and data loss limits must be adhered to with specific documentation in the plan.
i. Company and/or external critical resources, personnel, and necessary corrective actions must be specifically identified.
i. Specific responsibilities and tasks for responding to emergencies and resuming business operations must be included in the plan.
i. All applicable legal and regulatory requirements must be satisfied.
+-------------------+------------------+---------------+-------------------+------------------+
|**Availability** | **Availability** | **Scheduled** | **Recovery Time** | **Data Loss or** |
|**Classification** | **Requirements** | **Outage** | **Requirements** | **Impact Loss** |
+===================+==================+===============+===================+==================+
| High | High to | 30 minutes | 1 hour | Minimal |
| | Continuous | | | |
+-------------------+------------------+---------------+-------------------+------------------+
| | | | | |
+-------------------+------------------+---------------+-------------------+------------------+
| Medium | Standard | 2 hours | 4 hours | Some data loss |
| | Availability | | | is tolerated if |
| | | | | it results in |
| | | | | quicker |
| | | | | restoration |
+-------------------+------------------+---------------+-------------------+------------------+
| | | | | |
+-------------------+------------------+---------------+-------------------+------------------+
| Low | Limited | 4 hours | Next | Some data loss |
| | Availability | | business day | is tolerated if |
| | | | | it results in |
| | | | | quicker |
| | | | | restoration |
+-------------------+------------------+---------------+-------------------+------------------+
Table 3: Recovery Time and Data Loss Limits

View File

@@ -7,5 +7,279 @@ majorRevisions:
- date: Jun 1 2018
comment: Initial document
---
# Appendices
Appendix A: Handling of Classified Information
Appendix B: Form - Confidentiality Statement
# Purpose and Scope
a. This data classification policy defines the requirements to ensure that information within the organization is protected at an appropriate level.
a. This document applies to the entire scope of the organizations information security program. It includes all types of information, regardless of its form, such as paper or electronic documents, applications and databases, and knowledge or information that is not written.
a. This policy applies to all individuals and systems that have access to information kept by the organization.
# Background
a. This policy defines the high level objectives and implementation instructions for the organizations data classification scheme. This includes data classification levels, as well as procedures for the classification, labeling and handling of data within the organization. Confidentiality and non-disclosure agreements maintained by the organization must reference this policy.
# References
a. Risk Assessment Policy
a. Security Incident Management Policy
# Policy
a. If classified information is received from outside the organization, the person who receives the information must classify it in accordance with the rules prescribed in this policy. The person thereby will become the owner of the information.
a. If classified information is received from outside the organization and handled as part of business operations activities (e.g., customer data on provided cloud services), the information classification, as well as the owner of such information, must be made in accordance with the specifications of the respective customer service agreement and other legal requirements.
a. When classifying information, the level of confidentiality is determined by:
i. The value of the information, based on impacts identified during the risk assessment process. More information on risk assessments is defined in the Risk Assessment Policy (reference (a)).
i. Sensitivity and criticality of the information, based on the highest risk calculated for each information item during the risk assessment.
i. Legal, regulatory and contractual obligations.
+-------------------+------------------+---------------------------+---------------------------+
|**Confidentiality**| **Label** | **Classification** | **Access** |
| **Level** | | **Criteria** | **Restrictions** |
+===================+==================+===========================+============================+
| Public | For Public | Making the information | Information is available |
| | Release | public will not harm | to the public. |
| | | the organization in | |
| | | any way. | |
+-------------------+------------------+---------------------------+---------------------------+
| | | | |
+-------------------+------------------+---------------------------+---------------------------+
| Internal Use | Internal Use | Unauthorized access | Information is available |
| | | may cause minor damage | to all employees and |
| | | and/or inconvenience | authorized third parties. |
| | | to the organization. |
+-------------------+------------------+---------------------------+---------------------------+
| | | | |
+-------------------+------------------+---------------------------+---------------------------+
| Restricted | Restricted | Unauthorized access to | Information is available |
| | | information may cause | to a specific group of |
| | | considerable damage to | employees and authhorized |
| | | the business and/or | third parties. |
| | | the organization's | |
| | | reputation. | |
+-------------------+------------------+---------------------------+---------------------------+
| | | | |
+-------------------+------------------+---------------------------+---------------------------+
| Confidential |Confidential | Unauthorized access to | Information is available |
| | | information may cause | only to specific indivi- |
| | | catastrophic damage to | duals in the |
| | | business and/or the | organization. |
| | | organization's reputation.| |
+-------------------+------------------+---------------------------+---------------------------+
Table 3: Information Confidentiality Levels
&nbsp;
d. Information must be classified based on confidentiality levels as defined in Table 3.
e. Information and information system owners should try to use the lowest confidentiality level that ensures an adequate level of protection, thereby avoiding unnecessary production costs.
f. Information classified as “Restricted” or “Confidential” must be accompanied by a list of authorized persons in which the information owner specifies the names or job functions of persons who have the right to access that information.
g. Information classified as “Internal Use” must be accompanied by a list of authorized persons only if individuals outside the organization will have access to the document.
h. Information and information system owners must review the confidentiality level of their information assets every five years and assess whether the confidentiality level should be changed. Wherever possible, confidentiality levels should be lowered.
a. For cloud-based software services provided to customers, system owners under the companys control must also review the confidentiality level of their information systems after service agreement changes or after a customers formal notification. Where allowed by service agreements, confidentiality levels should be lowered.
a. Information must be labeled according to the following:
i. Paper documents: the confidentiality level is indicated on the top and bottom of each document page; it is also indicated on the front of the cover or envelope carrying such a document as well as on the filing folder in which the document is stored. If a document is not labeled, its default classification is Internal Use.
i. Electronic documents: the confidentiality level is indicated on the top and bottom of each document page. If a document is not labeled, its default classification is Internal Use.
i. Information systems: the confidentiality level in applications and databases must be indicated on the system access screen, as well as on the screen when displaying such information.
i. Electronic mail: the confidentiality level is indicated in the first line of the email body. If it is not labeled, its default classification is “Internal Use”.
i. Electronic storage media (disks, memory cards, etc.): the confidentiality level must be indicated on the top surface of the media. If it is not labeled, its default classification is “Internal Use”.
i. Information transmitted orally: the confidentiality level should be mentioned before discussing information during face-to-face communication, by telephone, or any other means of oral communication.
a. All persons accessing classified information must follow the guidelines listed in Appendix A, “Handling of Classified Information.”
a. All persons accessing classified information must complete and submit a Confidentiality Statement to their immediate supervisor or company point-of-contact. A sample Confidentiality Statement is in Appendix B.
a. Incidents related to the improper handling of classified information must be reported in accordance with the Security Incident Management Policy (reference (b)).
\pagebreak
# Appendix A: Handling of Classified Information
Information and information systems must be handled according to the following guidelines*:
a. Paper Documents
i. Internal Use
1. Only authorized persons may have access.
1. If sent outside the organization, the document must be sent as registered mail.
1. Documents may only be kept in rooms without public access.
1. Documents must be removed expeditiously from printers and fax machines.
i. Restricted
1. The document must be stored in a locked cabinet.
1. Documents may be transferred within and outside the organization only in a closed envelope.
1. If sent outside the organization, the document must be mailed with a return receipt service.
1. Documents must immediately be removed from printers and fax machines.
1. Only the document owner may copy the document.
1. Only the document owner may destroy the document.
i. Confidential
1. The document must be stored in a safe.
1. The document may be transferred within and outside the organization only by a trustworthy person in a closed and sealed envelope.
1. Faxing the document is not permitted.
1. The document may be printed only if the authorized person is standing next to the printer.
a. Electronic Documents
i. Internal Use
1. Only authorized persons may have access.
1. When documents are exchanged via unencrypted file sharing services such as FTP, they must be password protected.
1. Access to the information system where the document is stored must be protected by a strong password.
1. The screen on which the document is displayed must be automatically locked after 10 minutes of inactivity.
i. Restricted
1. Only persons with authorization for this document may access the part of the information system where this document is stored.
1. When documents are exchanged via file sharing services of any type, they must be encrypted.
1. Only the document owner may erase the document.
i. Confidential
1. The document must be stored in encrypted form.
1. The document may be stored only on servers which are controlled by the organization.
1. The document may only be shared via file sharing services that are encrypted such as HTTPS and SSH. Further, the document must be encrypted and protected with a string password when transferred.
a. Information Systems
i. Internal Use
1. Only authorized persons may have access.
1. Access to the information system must be protected by a strong password.
1. The screen must be automatically locked after 10 minutes of inactivity.
1. The information system may be only located in rooms with controlled physical access.
i. Restricted
1. Users must log out of the information system if they have temporarily or permanently left the workplace.
1. Data must be erased only with an algorithm that ensures secure deletion.
i. Confidential
1. Access to the information system must be controlled through multi-factor authentication (MFA).
1. The information system may only be installed on servers controlled by the organization.
1. The information system may only be located in rooms with controlled physical access and identity control of people accessing the room.
a. Electronic Mail
i. Internal Use
1. Only authorized persons may have access.
1. The sender must carefully check the recipient.
1. All rules stated under “information systems” apply.
i. Restricted
1. Email must be encrypted if sent outside the organization.
i. Confidential
1. Email must be encrypted.
a. Electronic Storage Media
i. Internal Use
1. Only authorized persons may have access.
1. Media or files must be password protected.
1. If sent outside the organization, the medium must be sent as registered mail.
1. The medium may only be kept in rooms with controlled physical access.
i. Restricted
1. Media and files must be encrypted.
1. Media must be stored in a locked cabinet.
1. If sent outside the organization, the medium must be mailed with a return receipt service.
1. Only the medium owner may erase or destroy the medium.
i. Confidential
1. Media must be stored in a safe.
1. Media may be transferred within and outside the organization only by a trustworthy person and in a closed and sealed envelope.
a. Information Transmitted Orally
i. Internal Use
1. Only authorized persons may have access to information.
1. Unauthorized persons must not be present in the room when the information is communicated.
i. Restricted
1. The room must be sound-proof.
1. The conversation must not be recorded.
i. Confidential
1. Conversation conducted through electronic means must be encrypted.
1. No transcript of the conversation may be kept.
In this document, controls are implemented cumulatively, meaning that controls for any confidentiality level imply the implementation of controls defined for lower confidentiality levels - if stricted controls are prescribed for a higher confidentiality level, then only such controls are implemented.
# Coming Soon

View File

@@ -8,4 +8,188 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define requirements for establishing and maintaining baseline protection standards for company software, network devices, servers, and desktops.
a. This policy applies to all users performing software development, system administration, and management of these activities within the organization. This typically includes employees and contractors, as well as any relevant external parties involved in these activities (hereinafter referred to as “users”). This policy must be made readily available to all users.
a. This policy also applies to enterprise-wide systems and applications developed by the organization or on behalf of the organization for production implementation.
# Background
a. The intent of this policy is to ensure a well-defined, secure and consistent process for managing the entire lifecycle of software and information systems, from initial requirements analysis until system decommission. The policy defines the procedure, roles, and responsibilities, for each stage of the software development lifecycle.
a. Within this policy, the software development lifecycle consists of requirements analysis, architecture and design, development, testing, deployment/implementation, operations/maintenance, and decommission. These processes may be followed in any form; in a waterfall model, it may be appropriate to follow the process linearly, while in an agile development model, the process can be repeated in an iterative fashion.
# References
a. Risk Assessment Policy
# Policy
a. The organizations Software Development Life Cycle (SDLC) includes the following phases:
i. Requirements Analysis
i. Architecture and Design
i. Testing
i. Deployment/Implementation
i. Operations/Maintenance
i. Decommission
a. During all phases of the SDLC where a system is not in production, the system must not have live data sets that contain information identifying actual people or corporate entities, actual financial data such as account numbers, security codes, routing information, or any other financially identifying data. Information that would be considered sensitive must never be used outside of production environments.
a. The following activities must be completed and/or considered during the requirements analysis phase:
i. Analyze business requirements.
i. Perform a risk assessment. More information on risk assessments is discussed in the Risk Assessment Policy (reference (a)).
i. Discuss aspects of security (e.g., confidentiality, integrity, availability) and how they might apply to this requirement.
i. Review regulatory requirements and the organizations policies, standards, procedures and guidelines.
i. Review future business goals.
i. Review current business and information technology operations.
i. Incorporate program management items, including:
1. Analysis of current system users/customers.
1. Understand customer-partner interface requirements (e.g., business-level, network).
1. Discuss project timeframe.
i. Develop and prioritize security solution requirements.
i. Assess cost and budget constraints for security solutions, including development and operations.
i. Approve security requirements and budget.
i. Make “buy vs. build” decisions for security services based on the information above.
a. The following must be completed/considered during the architecture and design phase:
i. Educate development teams on how to create a secure system.
i. Develop and/or refine infrastructure security architecture.
i. List technical and non-technical security controls.
i. Perform architecture walkthrough.
i. Create a system-level security design.
i. Create high-level non-technical and integrated technical security designs.
i. Perform a cost/benefit analysis for design components.
i. Document the detailed technical security design.
i. Perform a design review, which must include, at a minimum, technical reviews of application and infrastructure, as well as a review of high-level processes.
i. Describe detailed security processes and procedures, including: segregation of duties and segregation of development, testing and production environments.
i. Design initial end-user training and awareness programs.
i. Design a general security test plan.
i. Update the organizations policies, standards, and procedures, if appropriate.
i. Assess and document how to mitigate residual application and infrastructure vulnerabilities.
i. Design and establish separate development and test environments.
a. The following must be completed and/or considered during the development phase:
i. Set up a secure development environment (e.g., servers, storage).
i. Train infrastructure teams on installation and configuration of applicable software, if required.
i. Develop code for application-level security components.
i. Install, configure and integrate the test infrastructure.
i. Set up security-related vulnerability tracking processes.
i. Develop a detailed security test plan for current and future versions (i.e., regression testing).
i. Conduct unit testing and integration testing.
a. The following must be completed and/or considered during the testing phase:
i. Perform a code and configuration review through both static and dynamic analysis of code to identify vulnerabilities.
i. Test configuration procedures.
i. Perform system tests.
i. Conduct performance and load tests with security controls enabled.
i. Perform usability testing of application security controls.
i. Conduct independent vulnerability assessments of the system, including the infrastructure and application.
a. The following must be completed and/or considered during the deployment phase:
i. Conduct pilot deployment of the infrastructure, application and other relevant components.
i. Conduct transition between pilot and full-scale deployment.
i. Perform integrity checking on system files to ensure authenticity.
i. Deploy training and awareness programs to train administrative personnel and users in the systems security functions.
i. Require participation of at least two developers in order to conduct full-scale deployment to the production environment.
a. The following must be completed and/or considered during the operations/maintenance phase:
i. Several security tasks and activities must be routinely performed to operate and administer the system, including but not limited to:
1. Administering users and access.
1. Tuning performance.
1. Performing backups according to requirements defined in the System Availability Policy
1. Performing system maintenance (i.e., testing and applying security updates and patches).
1. Conducting training and awareness.
1. Conducting periodic system vulnerability assessments.
1. Conducting annual risk assessments.
i. Operational systems must:
1. Be reviewed to ensure that the security controls, both automated and manual, are functioning correctly and effectively.
1. Have logs that are periodically reviewed to evaluate the security of the system and validate audit controls.
1. Implement ongoing monitoring of systems and users to ensure detection of security violations and unauthorized changes.
1. Validate the effectiveness of the implemented security controls through security training as required by the Procedure For Executing Incident Response.
1. Have a software application and/or hardware patching process that is performed regularly in order to eliminate software bug and security problems being introduced into the organizations technology environment. Patches and updates must be applied within ninety (90) days of release to provide for adequate testing and propagation of software updates. Emergency, critical, break-fix, and zero-day vulnerability patch releases must be applied as quickly as possible.
a. The following must be completed and/or considered during the decommission phase:
i. Conduct unit testing and integration testing on the system after component removal.
i. Conduct operational transition for component removal/replacement.
i. Determine data retention requirements for application software and systems data.
i. Document the detailed technical security design.
i. Update the organizations policies, standards and procedures, if appropriate.
i. Assess and document how to mitigate residual application and infrastructure vulnerabilities.

View File

@@ -7,5 +7,76 @@ majorRevisions:
- date: Jun 1 2018
comment: Initial document
---
# Purpose and Scope
a. This policy defines organizational requirements for the use of cryptographic controls, as well as the requirements for cryptographic keys, in order to protect the confidentiality, integrity, authenticity and nonrepudiation of information.
a. This policy applies to all systems, equipment, facilities and information within the scope of the organizations information security program.
a. All employees, contractors, part-time and temporary workers, service providers, and those employed by others to perform work on behalf of the organization having to do with cryptographic systems, algorithms, or keying material are subject to this policy and must comply with it.
# Background
a. This policy defines the high level objectives and implementation instructions for the organizations use of cryptographic algorithms and keys. It is vital that the organization adopt a standard approach to cryptographic controls across all work centers in order to ensure end-to-end security, while also promoting interoperability. This document defines the specific algorithms approved for use, requirements for key management and protection, and requirements for using cryptography in cloud environments.
# Policy
a. The organization must protect individual systems or information by means of cryptographic controls as defined in Table 3:
\pagebreak
+---------------------+-------------------+----------------+--------------+
| **Name of System/** | **Cryptographic** | **Encryption** | **Key Size** |
| **Type of** | **Tool** | **Algorithm** | |
| **Information** | | | |
+=====================+===================+================+==============+
| Public Key | OpenSSL | AES-256 | 256-bit key |
| Infrastructure for | | | |
| Authentication | | | |
+---------------------+-------------------+----------------+--------------+
| | | | |
+---------------------+-------------------+----------------+--------------+
| Data Encryption | OpenSSL | AES-256 | 256-bit key |
| Keys | | | |
+---------------------+-------------------+----------------+--------------+
| | | | |
+---------------------+-------------------+----------------+--------------+
| Virtual Private | OpenSSL and | AES-256 | 256-bit key |
| Network (VPN) | OpenVPN | | |
| keys | | | |
+---------------------+-------------------+----------------+--------------+
| | | | |
+---------------------+-------------------+----------------+--------------+
| Website SSL | OpenSSL, CERT | AES-256 | 256-bit key |
| Certificate | | | |
+---------------------+-------------------+----------------+--------------+
Table 3: Cryptographic Controls
&nbsp;
b. Except where otherwise stated, keys must be managed by their owners.
c. Cryptographic keys must be protected against loss, change or destruction by applying appropriate access control mechanisms to prevent unauthorized use and backing up keys on a regular basis.
d. When required, customers of the organizations cloud-based software or platform offering must be able to obtain information regarding:
i. The cryptographic tools used to protect their information.
i. Any capabilities that are available to allow cloud service customers to apply their own cryptographic solutions.
i. The identity of the countries where the cryptographic tools are used to store or transfer cloud service customers data.
a. The use of organizationally-approved encryption must be governed in accordance with the laws of the country, region, or other regulating entity in which users perform their work. Encryption must not be used to violate any laws or regulations including import/export restrictions. The encryption used by the Company conforms to international standards and U.S. import/export requirements, and thus can be used across international boundaries for business purposes.
a. All key management must be performed using software that automatically manages access control, secure storage, backup and rotation of keys. Specifically:
i. The key management service must provide key access to specifically-designated users, with the ability to encrypt/decrypt information and generate data encryption keys.
i. The key management service must provide key administration access to specifically-designated users, with the ability to create, schedule delete, enable/disable rotation, and set usage policies for keys.
i. The key management service must store and backup keys for the entirety of their operational lifetime.
i. The key management service must rotate keys at least once every 12 months.
# Coming Soon

View File

@@ -8,4 +8,133 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. This removable media, cloud storage and Bring Your Own Device (BYOD) policy defines the objectives, requirements and implementing instructions for storing data on removable media, in cloud environments, and on personally-owned devices, regardless of data classification level.
a. This policy applies to all information and data within the organizations information security program, as well as all removable media, cloud systems and personally-owned devices either owned or controlled by the organization.
a. This policy applies to all users of information systems within the organization. This typically includes employees and contractors, as well as any external parties that come into contact with systems and information controlled by the organization (hereinafter referred to as “users”). This policy must be made readily available to all users.
# Background
a. This policy defines the procedures for safely using removable media, cloud storage and personally-owned devices to limit data loss or exposure. Such forms of storage must be strictly controlled because of the sensitive data that can be stored on them. Because each of these storage types are inherently ephemeral or portable in nature, it is possible for the organization to lose the ability to oversee or control the information stored on them if strict security standards are not followed.
a. This document consists of three sections pertaining to removable media, cloud storage, and personally-owned devices. Each section contains requirements and implementing instructions for the registration, management, maintenance, and disposition of each type of storage.
a. Within this policy, the term sensitive information refers to information that is classified as RESTRICTED or CONFIDENTIAL in accordance with the Data Classification Policy (reference (a)).
# References
a. Data Classification Policy
a. Asset Inventory
a. Security Incident Response Policy
a. Encryption Policy
# Policy
a. *Removable Media*
i. All removable media in active use and containing data pertinent to the organization must be registered in the organizations Asset Inventory (reference (b)).
i. All removable media listed in reference (b) must be re-inventoried on a quarterly basis to ensure that it is still within the control of the organization.
1. To re-inventory an item, the owner of the removable media must check in the item with the organizations Information Security Manager (ISM).
1. The ISM must treat any removable media that cannot be located as lost, and a security incident report must be logged in accordance with the Security Incident Response Policy (reference (c)).
i. The owner of the removable media must conduct all appropriate maintenance on the item at intervals appropriate to the type of media, such as cleaning, formatting, labeling, etc.
i. The owner of the removable media, where practical, must ensure that an alternate or backup copy of the information located on the device exists.
i. Removable media must be stored in a safe place that has a reduced risk of fire or flooding damage.
i. If the storage item contains sensitive information, removable media must:
1. Be stored in a locked cabinet or drawer.
1. Store only encrypted data that is securely enciphered in accordance with the Encryption Policy (reference (d)).
i. All data on removable media devices must be erased, or the device must be destroyed, before it is reused or disposed of.
i. When removable media devices are disposed, the device owner must inform the ISM so that it can be removed from reference (b).
a. *Cloud Storage*
i. All cloud storage systems in active use and containing data pertinent to the organization must be registered in reference (b). Registration may be accomplished by manual or automated means.
i. All cloud storage systems listed in reference (b) must be re-inventoried on a quarterly basis to ensure that it is still within the control of the organization. To re-inventory an item, the owner of the removable media must check in the item with the organizations Information Security Manager (ISM). Re-inventory may be accomplished by manual or automated means.
i. The owner of the cloud storage system must conduct all appropriate maintenance on the system at regular intervals to include system configuration, access control, performance monitoring, etc.
i. Data on cloud storage systems must be replicated to at least one other physical location. Depending on the cloud storage provider, this replication may be automatically configured.
i. The organization must only use cloud storage providers that can demonstrate, either through security accreditation, demonstration, tour, or other means that their facilities are secured, both physically and electronically, using best practices.
i. If the cloud storage system contains sensitive information, that information must be encrypted in accordance with reference (d).
i. Data must be erased from from cloud storage systems using a technology and process that is approved by the ISM.
i. When use of a cloud storage system is discontinued, the system owner must inform the ISM so that it can be removed from reference (b).
a. *Personally-owned Devices*
i. Organizational data that is stored, transferred or processed on personally-owned devices remains under the organizations ownership, and the organization retains the right to control such data even though it is not the owner of the device.
i. The ISM is responsible for conducting overall management of personally-owned devices, to include:
1. Installation and maintenance of Mobile Device Management (MDM) software that can effectively manage, control and wipe data under the organizations control from personally-owned devices.
1. Maintain a list of job titles and/or persons authorized to use personally-owned devices for the organizations business, as well as the applications and databases that may be accessed from such devices.
1. Maintain a list of applications prohibited from use on personally-owned devices, and ensuring that device users are aware of these restrictions.
i. Personally-identifiable information (PII) may not be stored, processed or accessed at any time on a personally-owned device.
i. The following acceptable use requirements must be observed by users of personally-owned devices:
1. All organizational data must be backed up at regular intervals.
1. MDM and endpoint protection software must be installed on the device at all times.
1. Sensitive information stored on the device must be encrypted in accordance with reference (d).
1. The device must be secured using a password, pin, unlock pattern, fingerprint or equivalent security mechanism.
1. The device must only connect to secure and encrypted wireless networks.
1. When using the device outside of the organizations premises, it must not be left unattended, and if possible, physically secured.
1. When using the device in public areas, the owner must take measures to ensure that the data cannot be read or accessed by unauthorized persons.
1. Patches and updates must be installed regularly.
1. Classified information must be protected in accordance with reference (a).
1. The device owner must install the ISM before the device is disposed of, sold, or provided to a third party for servicing.
1. It is prohibited to:
a. Allow device access for anyone except its owner.
a. Store illegal materials on the device.
a. Install unlicensed software.
a. Locally-store passwords.
a. Transfer organizational data to other devices which have not been approved by the organization.
i. The organization must reserve the right to view, edit, and/or delete any organizational information that is stored, processed or transferred on the device.
i. The organization must reserve the right to perform full deletion of all of its data on the device if it considers that necessary for the protection of company-related data, without the consent of the device owner.
i. The organization will not pay the employees (the owners of BYOD) any fee for using the device for work purposes.
i. The organization will pay for any new software that needs to be installed for company use.
i. All security breaches related to personally-owned devices must be reported immediately to the ISM.

View File

@@ -10,4 +10,50 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define requirements for connecting to the organizations systems and networks from remote hosts, including personally-owned devices, in order to minimize data loss/exposure.
a. This policy applies to all users of information systems within the organization. This typically includes employees and contractors, as well as any external parties that come into contact with systems and information controlled by the organization (hereinafter referred to as “users”). This policy must be made readily accessible to all users.
# Background
a. The intent of this policy is to minimize the organizations exposure to damages which may result from the unauthorized remote use of resources, including but not limited to: the loss of sensitive, company confidential data and intellectual property; damage to the organizations public image; damage to the organizations internal systems; and fines and/or other financial liabilities incurred as a result of such losses.
a. Within this policy, the following definitions apply:
i. *Mobile computing equipment:* includes portable computers, mobile phones, smart phones, memory cards and other mobile equipment used for storage, processing and transfer of data.
i. *Remote host:* is defined as an information system, node or network that is not under direct control of the organization.
i. *Telework:* the act of using mobile computing equipment and remote hosts to perform work outside the organizations physical premises. Teleworking does not include the use of mobile phones.
# Policy
a. *Security Requirements for Remote Hosts and Mobile Computing Equipment*
i. Caution must be exercised when mobile computing equipment is placed or used in uncontrolled spaces such as vehicles, public spaces, hotel rooms, meeting places, conference centers, and other unprotected areas outside the organizations premises.
i. When using remote hosts and mobile computing equipment, users must take care that information on the device (e.g. displayed on the screen) cannot be read by unauthorized persons if the device is being used to connect to the organizations systems or work with the organizations data.
i. Remote hosts must be updated and patched for the latest security updates on at least a monthly basis.
i. Remote hosts must have endpoint protection software (e.g. malware scanner) installed and updated at all times.
i. Persons using mobile computing equipment off-premises are responsible for regular backups of organizational data that resides on the the device.
i. Access to the organizations systems must be done through an encrypted and authenticated VPN connection with multi-factor authentication enabled. All users requiring remote access must be provisioned with VPN credentials from the organizations information technology team. VPN keys must be rotated at least twice per year. Revocation of VPN keys must be included in the Offboarding Policy.
i. Information stored on mobile computing equipment must be encrypted using hard drive full disk encryption.
a. *Security Requirements for Telework*
i. Employees must be specifically authorized for telework in writing from their hiring manager .
i. Only devices assigned owner is permitted to use remote nodes and mobile computing equipment. Unauthorized users (such as others living or working at the location where telework is performed) are not permitted to use such devices.
i. Devices must be authorized using certificates
i. Users performing telework are responsible for the appropriate configuration of the local network used for connecting to the Internet at their telework location.
i. Users performing telework must protect the organizations intellectual property rights, either for software or other materials that are present on remote nodes and mobile computing equipment.

View File

@@ -8,4 +8,130 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. The purpose of this policy is to define the methodology for the assessment and treatment of information security risks within the organization, and to define the acceptable level of risk as set by the organizations leadership.
a. Risk assessment and risk treatment are applied to the entire scope of the organizations information security program, and to all assets which are used within the organization or which could have an impact on information security within it.
a. This policy applies to all employees of the organization who take part in risk assessment and risk treatment.
# Background
a. A key element of the organizations information security program is a holistic and systematic approach to risk management. This policy defines the requirements and processes for the organization to identify information security risks. The process consists of four parts: identification of the organizations assets, as well as the threats and vulnerabilities that apply; assessment of the likelihood and consequence (risk) of the threats and vulnerabilities being realized, identification of treatment for each unacceptable risk, and evaluation of the residual risk after treatment.
# References
a. Risk Assessment Report Template
# Policy
a. *Risk Assessment*
i. The risk assessment process includes the identification of threats and vulnerabilities having to do with company assets.
i. The first step in the risk assessment is to identify all assets within the scope of the information security program; in other words, all assets which may affect the confidentiality, integrity, and/or availability of information in the organization. Assets may include documents in paper or electronic form, applications, databases, information technology equipment, infrastructure, and external/outsourced services and processes. For each asset, an owner must be identified.
i. The next step is to identify all threats and vulnerabilities associated with each asset. Threats and vulnerabilities must be listed in a risk assessment table. Each asset may be associated with multiple threats, and each threat may be associated with multiple vulnerabilities. A sample risk assessment table is provided as part of the Risk Assessment Report Template (reference (a)).
i. For each risk, an owner must be identified. The risk owner and the asset owner may be the same individual.
i. Once risk owners are identified, they must assess:
1. Consequences for each combination of threats and vulnerabilities for an individual asset if such a risk materializes.
1. Likelihood of occurrence of such a risk (i.e. the probability that a threat will exploit the vulnerability of the respective asset).
1. Criteria for determining consequence and likelihood are defined in Tables 3 and 4.
i. The risk level is calculated by adding the consequence score and the likelihood score.
+-----------------+-----------------+--------------------------------------------------------------+
| **Consequence** | **Consequence** | **Description** |
| **Level** | **Score** | |
+=================+=================+==============================================================+
| Low | 0 | Loss of confidentiality, integrity, or availability will not |
| | | affect the organization's cash flow, legal, or contractual |
| | | obligations, or reputation. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| Moderate | 1 | Loss of confidentiality, integrity, or availability may incur|
| | | financial cost and has low or moderate impact on the |
| | | organization's legal or contractual obligations and/or |
| | | reputation. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| High | 2 | Loss of confidentiality, integrity, or availability will have|
| | | immediate and or/considerable impact on the organization's |
| | | cash flow, operations, legal and contractual obligations,and/|
| | | or reputation. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
Table 3: Description of Consequence Levels and Criteria
+-----------------+-----------------+--------------------------------------------------------------+
| **Likelihood** | **Likelihood** | **Description** |
| **Level** | **Score** | |
+=================+=================+==============================================================+
| Low | 0 | Either existing security controls are strong and have so far |
| | | provided an adequate level of protection, or the probability |
| | | of the risk being realized is extremely low. No new incidents|
| | | are expected in the future. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| Moderate | 1 | Either existing security controls have most provided an |
| | | adequate level of protection or the probability of the risk |
| | | being realized is moderate. Some minor incidents may have |
| | | occured. New incidents are possible, but not highly likely. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
| High | 2 | Either existing security controls are not in place or |
| | | ineffective; there is a high probability of the risk being |
| | | realized. Incidents have a high likelihood of occuring in the|
| | | future. |
+-----------------+-----------------+--------------------------------------------------------------+
| | | |
+-----------------+-----------------+--------------------------------------------------------------+
Table 4: Description of Likelihood Levels and Criteria
&nbsp;
b. *Risk Acceptance Criteria*
i. Risk values 0 through 2 are considered to be acceptable risks.
i. Risk values 3 and 4 are considered to be unacceptable risks. Unacceptable risks must be treated.
c. *Risk Treatment*
i. Risk treatment is implemented through the Risk Treatment Table. All risks from the Risk Assessment Table must be copied to the Risk Treatment Table for disposition, along with treatment options and residual risk. A sample Risk Treatment Table is provided in reference (a).
i. As part of this risk treatment process, the CEO and/or other company managers shall determine objectives for mitigating or treating risks. All unacceptable risks must be treated. For continuous improvement purposes, company managers may also opt to treat other risks for company assets, even if their risk score is deemed to be acceptable.
i. Treatment options for risks include the following options:
1. Selection or development of security control(s).
1. Transferring the risks to a third party; for example, by purchasing an insurance policy or signing a contract with suppliers or partners.
1. Avoiding the risk by discontinuing the business activity that causes such risk.
1. Accepting the risk; this option is permitted only if the selection of other risk treatment options would cost more than the potential impact of the risk being realized.
i. After selecting a treatment option, the risk owner should estimate the new consequence and likelihood values after the planned controls are implemented.
a. *Regular Reviews of Risk Assessment and Risk Treatment*
i. The Risk Assessment Table and Risk Treatment Table must be updated when newly identified risks are identified. At a minimum, this update and review shall be conducted once per year. It is highly recommended that the Risk Assessment and Risk Treatment Table be updated when significant changes occur to the organization, technology, business objectives, or business environment.
a. *Reporting*
i. The results of risk assessment and risk treatment, and all subsequent reviews, shall be documented in a Risk Assessment Report.

View File

@@ -8,4 +8,40 @@ majorRevisions:
comment: Initial document
---
# Coming Soon
# Purpose and Scope
a. This policy defines the rules for relationships with the organizations Information Technology (IT) vendors and partners.
a. This policy applies to all IT vendors and partners who have the ability to impact the confidentiality, integrity, and availability of the organizations technology and sensitive information, or who are within the scope of the organizations information security program.
a. This policy applies to all employees and contractors that are responsible for the management and oversight of IT vendors and partners of the organization.
# Background
a. The overall security of the organization is highly dependent on the security of its contractual relationships with its IT suppliers and partners. This policy defines requirements for effective management and oversight of such suppliers and partners from an information security perspective. The policy prescribes minimum standards a vendor must meet from an information security standpoint, including security clauses, risk assessments, service level agreements, and incident management.
# References
a. Information Security Policy
a. Security Incident Response Policy
# Policy
a. IT vendors are prohibited from accessing the organizations information security assets until a contract containing security controls is agreed to and signed by the appropriate parties.
a. All IT vendors must comply with the security policies defined and derived from the Information Security Policy (reference (a)).
a. All security incidents by IT vendors or partners must be documented in accordance with the organizations Security Incident Response Policy (reference (b)) and immediately forwarded to the Information Security Manager (ISM).
a. The organization must adhere to the terms of all Service Level Agreements (SLAs) entered into with IT vendors. As terms are updated, and as new ones are entered into, the organization must implement any changes or controls needed to ensure it remains in compliance.
a. Before entering into a contract and gaining access to the parent organizations information systems, IT vendors must undergo a risk assessment.
i. Security risks related to IT vendors and partners must be identified during the risk assessment process.
i. The risk assessment must identify risks related to information and communication technology, as well as risks related to IT vendor supply chains, to include sub-suppliers.
a. IT vendors and partners must ensure that organizational records are protected, safeguarded, and disposed of securely. The organization strictly adheres to all applicable legal, regulatory and contractual requirements regarding the collection, processing, and transmission of sensitive data such as Personally-Identifiable Information (PII).
a. The organization may choose to audit IT vendors and partners to ensure compliance with applicable security policies, as well as legal, regulatory and contractual obligations.

View File

@@ -2,4 +2,10 @@ id: "offboard"
name: "Offboard User"
---
# Coming Soon
Resolve this ticket by executing the following steps:
- [ ] Immediately suspend user in SSO
- [ ] Append HR termination request e-mail to this ticket
- [ ] Look up manually-provisioned applications for this role or user
- [ ] Validate access revocation in each
- [ ] Append confirmation or revocation to this ticket

View File

@@ -2,4 +2,11 @@ id: "onboard"
name: "Onboard New User"
---
# Coming Soon
Resolve this ticket by executing the following steps:
- [ ] Append HR add request e-mail to this ticket
- [ ] Proactively validate role assignment with manager (see HR request e-mail)
- [ ] Add user to default group for the specified role
- [ ] Provision any manually-provisioned applications by role
- [ ] Append manual provisioning confirmation to this ticket
- [ ] Proactively confirm with new user that they can access all provisioned systems

View File

@@ -1,6 +1,15 @@
id: "patch"
name: "Apply OS patches"
cron: "0 0 1 * * *"
cron: "0 0 0 15 * *"
---
# Coming Soon
# OS Patch Procedure
Resolve this ticket by executing the following steps:
- [ ] Pull the latest scripts from the Ops repository
- [ ] Execute `ENV=staging patch-all.sh`
- [ ] Inspect output
- [ ] Errors? Investigate and resolve
- [ ] Execute `ENV=production patch-all.sh`
- [ ] Attach log output to this ticket

View File

@@ -1,6 +1,40 @@
id: "workstation"
name: "Collect Workstation Details"
cron: "0 0 * * * *"
cron: "0 0 0 15 4 *"
---
# Coming Soon
Resolve this ticket by executing the following steps:
- [ ] Send the communications below
- [ ] For any email replies, attach content to this ticket
- [ ] Validate responses are received from each
```
To: Desktop support
Subject: Annual workstation inventory
Please attach the current workstation inventory to the following ticket: [REPLACE WITH URL TO THIS TICKET]
The workstation inventory shall include the following fields:
* Serial number
* Custodian
* Full disk encryption status
* Malware protection status
```
```
To: Outsourced Call Center IT
Subject: Annual workstation inventory
As part of our ongoing compliance efforts and per our services agreement, we require a current inventory of workstations in use in the service of our account.
Please respond to this message with the current inventory.
The workstation inventory shall include the following fields:
* Serial number
* Custodian
* Full disk encryption status
* Malware protection status
```

10
tools.go Normal file
View File

@@ -0,0 +1,10 @@
// +build tools
package tools
import (
_ "github.com/Clever/gitsem"
_ "github.com/aktau/github-release"
_ "github.com/containous/go-bindata/go-bindata" // v1.0.0
_ "github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs" // v0.0.0-20170227122030-30f82fa23fd8
)

24
vendor/github.com/Clever/gitsem/.drone.yml generated vendored Normal file
View File

@@ -0,0 +1,24 @@
image: bradrydzewski/go:1.3
script:
- make test
notify:
email:
recipients:
- drone@clever.com
hipchat:
room: Clever-Dev-CI
token: {{hipchat_token}}
on_started: true
on_success: true
on_failure: true
publish:
github:
branch: master
script:
- make release
artifacts:
- release
tag: v$(cat VERSION)
token: {{github_token}}
user: Clever
repo: gitsem

25
vendor/github.com/Clever/gitsem/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,25 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
gitsem

54
vendor/github.com/Clever/gitsem/Makefile generated vendored Normal file
View File

@@ -0,0 +1,54 @@
SHELL := /bin/bash
PKG = github.com/Clever/gitsem
PKGS = $(PKG)
VERSION := $(shell cat VERSION)
EXECUTABLE := gitsem
BUILDS := \
build/$(EXECUTABLE)-v$(VERSION)-darwin-amd64 \
build/$(EXECUTABLE)-v$(VERSION)-linux-amd64 \
build/$(EXECUTABLE)-v$(VERSION)-windows-amd64
COMPRESSED_BUILDS := $(BUILDS:%=%.tar.gz)
RELEASE_ARTIFACTS := $(COMPRESSED_BUILDS:build/%=release/%)
.PHONY: test golint
golint:
@go get github.com/golang/lint/golint
test: $(PKGS)
$(PKGS): golint
@go get -d -t $@
@gofmt -w=true $(GOPATH)/src/$@*/**.go
ifneq ($(NOLINT),1)
@echo "LINTING..."
@PATH=$(PATH):$(GOPATH)/bin golint $(GOPATH)/src/$@*/**.go
@echo ""
endif
ifeq ($(COVERAGE),1)
@go test -cover -coverprofile=$(GOPATH)/src/$@/c.out $@ -test.v
@go tool cover -html=$(GOPATH)/src/$@/c.out
else
@echo "TESTING..."
@go test $@ -test.v
endif
run:
@go run main.go
build/$(EXECUTABLE)-v$(VERSION)-darwin-amd64:
GOARCH=amd64 GOOS=darwin go build -o "$@/$(EXECUTABLE)"
build/$(EXECUTABLE)-v$(VERSION)-linux-amd64:
GOARCH=amd64 GOOS=linux go build -o "$@/$(EXECUTABLE)"
build/$(EXECUTABLE)-v$(VERSION)-windows-amd64:
GOARCH=amd64 GOOS=windows go build -o "$@/$(EXECUTABLE).exe"
build: $(BUILDS)
%.tar.gz: %
tar -C `dirname $<` -zcvf "$<.tar.gz" `basename $<`
$(RELEASE_ARTIFACTS): release/% : build/%
mkdir -p release
cp $< $@
release: $(RELEASE_ARTIFACTS)
clean:
rm -rf build release

34
vendor/github.com/Clever/gitsem/README.md generated vendored Normal file
View File

@@ -0,0 +1,34 @@
# gitsem
A command line utility for managing semantically versioned (semver) git tags.
Run this in a git repository to bump the version and write the new data back to the VERSION file.
It will also create a version commit and (optional) tag, and fail if the repo is not clean.
## Installation
```shell
$ go get github.com/Clever/gitsem
```
## Example
```shell
$ gitsem patch
$ gitsem -m "Upgrade to %s for reasons" patch
$ gitsem minor
```
## Usage
```shell
gitsem [options] version
```
`version` can be one of: `newversion | patch | minor | major`
The version argument should be a valid semver string, or a field of a semver string (one of "patch", "minor", or "major").
In the second case, the existing version will be incremented by 1 in the specified field.
### Options
- `m=%s` specifies a commit message to use when bumping the version. If %s appears, it will be replaced with the new version number.
- `tag=true` whether or not to create a tag at the version commit

1
vendor/github.com/Clever/gitsem/VERSION generated vendored Normal file
View File

@@ -0,0 +1 @@
1.0.4

39
vendor/github.com/Clever/gitsem/git.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
package main
import (
"bytes"
"os/exec"
"strings"
)
func isRepoClean() (bool, error) {
cmd := exec.Command("git", "status", "-s")
result := &bytes.Buffer{}
cmd.Stdout = result
if err := cmd.Run(); err != nil {
return false, err
}
return result.String() == "", nil
}
func repoRoot() (string, error) {
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
result := &bytes.Buffer{}
cmd.Stdout = result
if err := cmd.Run(); err != nil {
return "", err
}
return strings.TrimSpace(result.String()), nil
}
func addFile(path string) error {
return exec.Command("git", "add", path).Run()
}
func commit(message string) error {
return exec.Command("git", "commit", "-m", message).Run()
}
func tag(version string) error {
return exec.Command("git", "tag", version).Run()
}

126
vendor/github.com/Clever/gitsem/main.go generated vendored Normal file
View File

@@ -0,0 +1,126 @@
package main
import (
"flag"
"fmt"
"gopkg.in/blang/semver.v1"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
)
func commitMessage(message, version string) string {
if strings.Contains(message, "%s") {
return fmt.Sprintf(message, version)
}
return message
}
func getCurrentVersion(path string) (*semver.Version, error) {
if _, err := os.Stat(path); os.IsNotExist(err) {
return &semver.Version{}, nil
}
contents, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return semver.New(strings.TrimSpace(string(contents)))
}
const versionFileName = "VERSION"
func exitWithError(message string) {
fmt.Fprintf(os.Stderr, message+"\n\n")
flag.Usage()
os.Exit(1)
}
func bump(old *semver.Version, part string) *semver.Version {
// We don't want to mutate the input, but there's no Clone or Copy method on a semver.Version,
// so we make a new one by parsing the string version of the old one.
// We ignore any errors because we know it's valid semver.
new, _ := semver.New(old.String())
switch part {
case "major":
new.Major++
new.Minor = 0
new.Patch = 0
case "minor":
new.Minor++
new.Patch = 0
case "patch":
new.Patch++
}
return new
}
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s: [options] version\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "version can be one of: newversion | patch | minor | major\n\n")
fmt.Fprintf(os.Stderr, "options:\n")
flag.PrintDefaults()
}
message := flag.String("m", "%s", "commit message for version commit")
help := flag.Bool("h", false, "print usage and exit")
shouldTag := flag.Bool("tag", true, "whether or not to make a tag at the version commit")
flag.Parse()
if *help {
flag.Usage()
os.Exit(0)
}
if *message == "" {
exitWithError("missing message")
}
if clean, err := isRepoClean(); err != nil {
log.Fatal(err)
} else if !clean {
log.Fatal("repo isn't clean")
}
root, err := repoRoot()
if err != nil {
log.Fatal(err)
}
versionFile := filepath.Join(root, versionFileName)
version, err := getCurrentVersion(versionFile)
if err != nil {
log.Fatal(err)
}
if len(flag.Args()) != 1 {
exitWithError("gitsem takes exactly one non-flag argument: version")
}
newVersion := flag.Args()[0]
switch newVersion {
case "patch", "minor", "major":
version = bump(version, newVersion)
default:
if version, err = semver.New(newVersion); err != nil {
log.Fatalf("failed to parse %s as semver: %s", newVersion, err.Error())
}
}
if err := ioutil.WriteFile(versionFile, []byte(version.String()), 0666); err != nil {
log.Fatal(err)
}
if err := addFile(versionFile); err != nil {
log.Fatal(err)
}
versionString := "v" + version.String()
*message = commitMessage(*message, versionString)
if err := commit(*message); err != nil {
log.Fatal(err)
}
if *shouldTag {
if err := tag(versionString); err != nil {
log.Fatal(err)
}
}
fmt.Println(versionString)
}

View File

@@ -1,137 +1,137 @@
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

View File

@@ -16,6 +16,7 @@ import (
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
type atomicBool int32
@@ -79,6 +80,7 @@ type win32File struct {
wg sync.WaitGroup
wgLock sync.RWMutex
closing atomicBool
socket bool
readDeadline deadlineHandler
writeDeadline deadlineHandler
}
@@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
}
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return makeWin32File(h)
// If we return the result of makeWin32File directly, it can result in an
// interface-wrapped nil, rather than a nil interface value.
f, err := makeWin32File(h)
if err != nil {
return nil, err
}
return f, nil
}
// closeHandle closes the resources associated with a Win32 handle
@@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
if f.closing.isSet() {
err = ErrFileClosed
}
} else if err != nil && f.socket {
// err is from Win32. Query the overlapped structure to get the winsock error.
var bytes, flags uint32
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
}
case <-timeout:
cancelIoEx(f.handle, &c.o)
@@ -265,6 +277,10 @@ func (f *win32File) Flush() error {
return syscall.FlushFileBuffers(f.handle)
}
func (f *win32File) Fd() uintptr {
return uintptr(f.handle)
}
func (d *deadlineHandler) set(deadline time.Time) error {
d.setLock.Lock()
defer d.setLock.Unlock()

View File

@@ -20,7 +20,8 @@ const (
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uintptr // includes padding
FileAttributes uint32
pad uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.

9
vendor/github.com/Microsoft/go-winio/go.mod generated vendored Normal file
View File

@@ -0,0 +1,9 @@
module github.com/Microsoft/go-winio
go 1.12
require (
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.1
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
)

16
vendor/github.com/Microsoft/go-winio/go.sum generated vendored Normal file
View File

@@ -0,0 +1,16 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

305
vendor/github.com/Microsoft/go-winio/hvsock.go generated vendored Normal file
View File

@@ -0,0 +1,305 @@
package winio
import (
"fmt"
"io"
"net"
"os"
"syscall"
"time"
"unsafe"
"github.com/Microsoft/go-winio/pkg/guid"
)
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
const (
afHvSock = 34 // AF_HYPERV
socketError = ^uintptr(0)
)
// An HvsockAddr is an address for a AF_HYPERV socket.
type HvsockAddr struct {
VMID guid.GUID
ServiceID guid.GUID
}
type rawHvsockAddr struct {
Family uint16
_ uint16
VMID guid.GUID
ServiceID guid.GUID
}
// Network returns the address's network name, "hvsock".
func (addr *HvsockAddr) Network() string {
return "hvsock"
}
func (addr *HvsockAddr) String() string {
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
}
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
func VsockServiceID(port uint32) guid.GUID {
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
g.Data1 = port
return g
}
func (addr *HvsockAddr) raw() rawHvsockAddr {
return rawHvsockAddr{
Family: afHvSock,
VMID: addr.VMID,
ServiceID: addr.ServiceID,
}
}
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
addr.VMID = raw.VMID
addr.ServiceID = raw.ServiceID
}
// HvsockListener is a socket listener for the AF_HYPERV address family.
type HvsockListener struct {
sock *win32File
addr HvsockAddr
}
// HvsockConn is a connected socket of the AF_HYPERV address family.
type HvsockConn struct {
sock *win32File
local, remote HvsockAddr
}
func newHvSocket() (*win32File, error) {
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
if err != nil {
return nil, os.NewSyscallError("socket", err)
}
f, err := makeWin32File(fd)
if err != nil {
syscall.Close(fd)
return nil, err
}
f.socket = true
return f, nil
}
// ListenHvsock listens for connections on the specified hvsock address.
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
l := &HvsockListener{addr: *addr}
sock, err := newHvSocket()
if err != nil {
return nil, l.opErr("listen", err)
}
sa := addr.raw()
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
}
err = syscall.Listen(sock.handle, 16)
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
}
return &HvsockListener{sock: sock, addr: *addr}, nil
}
func (l *HvsockListener) opErr(op string, err error) error {
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
}
// Addr returns the listener's network address.
func (l *HvsockListener) Addr() net.Addr {
return &l.addr
}
// Accept waits for the next connection and returns it.
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock, err := newHvSocket()
if err != nil {
return nil, l.opErr("accept", err)
}
defer func() {
if sock != nil {
sock.Close()
}
}()
c, err := l.sock.prepareIo()
if err != nil {
return nil, l.opErr("accept", err)
}
defer l.sock.wg.Done()
// AcceptEx, per documentation, requires an extra 16 bytes per address.
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
var addrbuf [addrlen * 2]byte
var bytes uint32
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
_, err = l.sock.asyncIo(c, nil, bytes, err)
if err != nil {
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
}
conn := &HvsockConn{
sock: sock,
}
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
sock = nil
return conn, nil
}
// Close closes the listener, causing any pending Accept calls to fail.
func (l *HvsockListener) Close() error {
return l.sock.Close()
}
/* Need to finish ConnectEx handling
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
sock, err := newHvSocket()
if err != nil {
return nil, err
}
defer func() {
if sock != nil {
sock.Close()
}
}()
c, err := sock.prepareIo()
if err != nil {
return nil, err
}
defer sock.wg.Done()
var bytes uint32
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
if err != nil {
return nil, err
}
conn := &HvsockConn{
sock: sock,
remote: *addr,
}
sock = nil
return conn, nil
}
*/
func (conn *HvsockConn) opErr(op string, err error) error {
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
}
func (conn *HvsockConn) Read(b []byte) (int, error) {
c, err := conn.sock.prepareIo()
if err != nil {
return 0, conn.opErr("read", err)
}
defer conn.sock.wg.Done()
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var flags, bytes uint32
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
if err != nil {
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsarecv", err)
}
return 0, conn.opErr("read", err)
} else if n == 0 {
err = io.EOF
}
return n, err
}
func (conn *HvsockConn) Write(b []byte) (int, error) {
t := 0
for len(b) != 0 {
n, err := conn.write(b)
if err != nil {
return t + n, err
}
t += n
b = b[n:]
}
return t, nil
}
func (conn *HvsockConn) write(b []byte) (int, error) {
c, err := conn.sock.prepareIo()
if err != nil {
return 0, conn.opErr("write", err)
}
defer conn.sock.wg.Done()
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var bytes uint32
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
if err != nil {
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsasend", err)
}
return 0, conn.opErr("write", err)
}
return n, err
}
// Close closes the socket connection, failing any pending read or write calls.
func (conn *HvsockConn) Close() error {
return conn.sock.Close()
}
func (conn *HvsockConn) shutdown(how int) error {
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
if err != nil {
return os.NewSyscallError("shutdown", err)
}
return nil
}
// CloseRead shuts down the read end of the socket.
func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD)
if err != nil {
return conn.opErr("close", err)
}
return nil
}
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
// no more data will be written.
func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR)
if err != nil {
return conn.opErr("close", err)
}
return nil
}
// LocalAddr returns the local address of the connection.
func (conn *HvsockConn) LocalAddr() net.Addr {
return &conn.local
}
// RemoteAddr returns the remote address of the connection.
func (conn *HvsockConn) RemoteAddr() net.Addr {
return &conn.remote
}
// SetDeadline implements the net.Conn SetDeadline method.
func (conn *HvsockConn) SetDeadline(t time.Time) error {
conn.SetReadDeadline(t)
conn.SetWriteDeadline(t)
return nil
}
// SetReadDeadline implements the net.Conn SetReadDeadline method.
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
return conn.sock.SetReadDeadline(t)
}
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
return conn.sock.SetWriteDeadline(t)
}

View File

@@ -3,10 +3,13 @@
package winio
import (
"context"
"errors"
"fmt"
"io"
"net"
"os"
"runtime"
"syscall"
"time"
"unsafe"
@@ -15,10 +18,51 @@ import (
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
type ioStatusBlock struct {
Status, Information uintptr
}
type objectAttributes struct {
Length uintptr
RootDirectory uintptr
ObjectName *unicodeString
Attributes uintptr
SecurityDescriptor *securityDescriptor
SecurityQoS uintptr
}
type unicodeString struct {
Length uint16
MaximumLength uint16
Buffer uintptr
}
type securityDescriptor struct {
Revision byte
Sbz1 byte
Control uint16
Owner uintptr
Group uintptr
Sacl uintptr
Dacl uintptr
}
type ntstatus int32
func (status ntstatus) Err() error {
if status >= 0 {
return nil
}
return rtlNtStatusToDosError(status)
}
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
@@ -26,21 +70,20 @@ const (
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
cPIPE_ACCESS_DUPLEX = 0x3
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
cPIPE_UNLIMITED_INSTANCES = 255
cNMPWAIT_USE_DEFAULT_WAIT = 0
cNMPWAIT_NOWAIT = 1
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_TYPE_MESSAGE = 4
cPIPE_READMODE_MESSAGE = 2
cFILE_OPEN = 1
cFILE_CREATE = 2
cFILE_PIPE_MESSAGE_TYPE = 1
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
cSE_DACL_PRESENT = 4
)
var (
@@ -121,6 +164,11 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
} else if err == syscall.ERROR_MORE_DATA {
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
// and the message still has more bytes. Treat this as a success, since
// this package presents all named pipes as byte streams.
err = nil
}
return n, err
}
@@ -133,40 +181,53 @@ func (s pipeAddress) String() string {
return string(s)
}
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
for {
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err == nil {
return h, nil
}
if err != cERROR_PIPE_BUSY {
return h, &os.PathError{Err: err, Op: "open", Path: *path}
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
time.Sleep(time.Millisecond * 10)
}
}
}
// DialPipe connects to a named pipe by path, timing out if the connection
// takes longer than the specified duration. If timeout is nil, then the timeout
// is the default timeout established by the pipe server.
// takes longer than the specified duration. If timeout is nil, then we use
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
var absTimeout time.Time
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
} else {
absTimeout = time.Now().Add(time.Second * 2)
}
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
conn, err := DialPipeContext(ctx, path)
if err == context.DeadlineExceeded {
return nil, ErrTimeout
}
return conn, err
}
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
// cancellation or timeout.
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
var err error
var h syscall.Handle
for {
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != cERROR_PIPE_BUSY {
break
}
now := time.Now()
var ms uint32
if absTimeout.IsZero() {
ms = cNMPWAIT_USE_DEFAULT_WAIT
} else if now.After(absTimeout) {
ms = cNMPWAIT_NOWAIT
} else {
ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000)
}
err = waitNamedPipe(path, ms)
if err != nil {
if err == cERROR_SEM_TIMEOUT {
return nil, ErrTimeout
}
break
}
}
h, err = tryDialPipe(ctx, &path)
if err != nil {
return nil, &os.PathError{Op: "open", Path: path, Err: err}
return nil, err
}
var flags uint32
@@ -175,16 +236,6 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
return nil, err
}
var state uint32
err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0)
if err != nil {
return nil, err
}
if state&cPIPE_READMODE_MESSAGE != 0 {
return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")}
}
f, err := makeWin32File(h)
if err != nil {
syscall.Close(h)
@@ -207,43 +258,87 @@ type acceptResponse struct {
}
type win32PipeListener struct {
firstHandle syscall.Handle
path string
securityDescriptor []byte
config PipeConfig
acceptCh chan (chan acceptResponse)
closeCh chan int
doneCh chan int
firstHandle syscall.Handle
path string
config PipeConfig
acceptCh chan (chan acceptResponse)
closeCh chan int
doneCh chan int
}
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
if first {
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
}
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
if c.MessageMode {
mode |= cPIPE_TYPE_MESSAGE
}
sa := &syscall.SecurityAttributes{}
sa.Length = uint32(unsafe.Sizeof(*sa))
if securityDescriptor != nil {
len := uint32(len(securityDescriptor))
sa.SecurityDescriptor = localAlloc(0, len)
defer localFree(sa.SecurityDescriptor)
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
}
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
path16, err := syscall.UTF16FromString(path)
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
var oa objectAttributes
oa.Length = unsafe.Sizeof(oa)
var ntPath unicodeString
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
defer localFree(ntPath.Buffer)
oa.ObjectName = &ntPath
// The security descriptor is only needed for the first pipe.
if first {
if sd != nil {
len := uint32(len(sd))
sdb := localAlloc(0, len)
defer localFree(sdb)
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
} else {
// Construct the default named pipe security descriptor.
var dacl uintptr
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
}
defer localFree(dacl)
sdb := &securityDescriptor{
Revision: 1,
Control: cSE_DACL_PRESENT,
Dacl: dacl,
}
oa.SecurityDescriptor = sdb
}
}
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
if c.MessageMode {
typ |= cFILE_PIPE_MESSAGE_TYPE
}
disposition := uint32(cFILE_OPEN)
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
if first {
disposition = cFILE_CREATE
// By not asking for read or write access, the named pipe file system
// will put this pipe into an initially disconnected state, blocking
// client connections until the next call with first == false.
access = syscall.SYNCHRONIZE
}
timeout := int64(-50 * 10000) // 50ms
var (
h syscall.Handle
iosb ioStatusBlock
)
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
runtime.KeepAlive(ntPath)
return h, nil
}
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
if err != nil {
return nil, err
}
@@ -354,22 +449,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
if err != nil {
return nil, err
}
// Immediately open and then close a client handle so that the named pipe is
// created but not currently accepting connections.
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != nil {
syscall.Close(h)
return nil, err
}
syscall.Close(h2)
l := &win32PipeListener{
firstHandle: h,
path: path,
securityDescriptor: sd,
config: *c,
acceptCh: make(chan (chan acceptResponse)),
closeCh: make(chan int),
doneCh: make(chan int),
firstHandle: h,
path: path,
config: *c,
acceptCh: make(chan (chan acceptResponse)),
closeCh: make(chan int),
doneCh: make(chan int),
}
go l.listenerRoutine()
return l, nil

235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go generated vendored Normal file
View File

@@ -0,0 +1,235 @@
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
// and the Windows (mixed-endian) encoding. See here for details:
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
package guid
import (
"crypto/rand"
"crypto/sha1"
"encoding"
"encoding/binary"
"fmt"
"strconv"
"golang.org/x/sys/windows"
)
// Variant specifies which GUID variant (or "type") of the GUID. It determines
// how the entirety of the rest of the GUID is interpreted.
type Variant uint8
// The variants specified by RFC 4122.
const (
// VariantUnknown specifies a GUID variant which does not conform to one of
// the variant encodings specified in RFC 4122.
VariantUnknown Variant = iota
VariantNCS
VariantRFC4122
VariantMicrosoft
VariantFuture
)
// Version specifies how the bits in the GUID were generated. For instance, a
// version 4 GUID is randomly generated, and a version 5 is generated from the
// hash of an input string.
type Version uint8
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type so that stringification and
// marshaling can be supported. The representation matches that used by native
// Windows code.
type GUID windows.GUID
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) {
var b [16]byte
if _, err := rand.Read(b[:]); err != nil {
return GUID{}, err
}
g := FromArray(b)
g.setVersion(4) // Version 4 means randomly generated.
g.setVariant(VariantRFC4122)
return g, nil
}
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
// and the sample code treats it as a series of bytes, so we do the same here.
//
// Some implementations, such as those found on Windows, treat the name as a
// big-endian UTF16 stream of bytes. If that is desired, the string can be
// encoded as such before being passed to this function.
func NewV5(namespace GUID, name []byte) (GUID, error) {
b := sha1.New()
namespaceBytes := namespace.ToArray()
b.Write(namespaceBytes[:])
b.Write(name)
a := [16]byte{}
copy(a[:], b.Sum(nil))
g := FromArray(a)
g.setVersion(5) // Version 5 means generated from a string.
g.setVariant(VariantRFC4122)
return g, nil
}
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
var g GUID
g.Data1 = order.Uint32(b[0:4])
g.Data2 = order.Uint16(b[4:6])
g.Data3 = order.Uint16(b[6:8])
copy(g.Data4[:], b[8:16])
return g
}
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
b := [16]byte{}
order.PutUint32(b[0:4], g.Data1)
order.PutUint16(b[4:6], g.Data2)
order.PutUint16(b[6:8], g.Data3)
copy(b[8:16], g.Data4[:])
return b
}
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
func FromArray(b [16]byte) GUID {
return fromArray(b, binary.BigEndian)
}
// ToArray returns an array of 16 bytes representing the GUID in big-endian
// encoding.
func (g GUID) ToArray() [16]byte {
return g.toArray(binary.BigEndian)
}
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
func FromWindowsArray(b [16]byte) GUID {
return fromArray(b, binary.LittleEndian)
}
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
// encoding.
func (g GUID) ToWindowsArray() [16]byte {
return g.toArray(binary.LittleEndian)
}
func (g GUID) String() string {
return fmt.Sprintf(
"%08x-%04x-%04x-%04x-%012x",
g.Data1,
g.Data2,
g.Data3,
g.Data4[:2],
g.Data4[2:])
}
// FromString parses a string containing a GUID and returns the GUID. The only
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
// format.
func FromString(s string) (GUID, error) {
if len(s) != 36 {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
var g GUID
data1, err := strconv.ParseUint(s[0:8], 16, 32)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data1 = uint32(data1)
data2, err := strconv.ParseUint(s[9:13], 16, 16)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data2 = uint16(data2)
data3, err := strconv.ParseUint(s[14:18], 16, 16)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data3 = uint16(data3)
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data4[i] = uint8(v)
}
return g, nil
}
func (g *GUID) setVariant(v Variant) {
d := g.Data4[0]
switch v {
case VariantNCS:
d = (d & 0x7f)
case VariantRFC4122:
d = (d & 0x3f) | 0x80
case VariantMicrosoft:
d = (d & 0x1f) | 0xc0
case VariantFuture:
d = (d & 0x0f) | 0xe0
case VariantUnknown:
fallthrough
default:
panic(fmt.Sprintf("invalid variant: %d", v))
}
g.Data4[0] = d
}
// Variant returns the GUID variant, as defined in RFC 4122.
func (g GUID) Variant() Variant {
b := g.Data4[0]
if b&0x80 == 0 {
return VariantNCS
} else if b&0xc0 == 0x80 {
return VariantRFC4122
} else if b&0xe0 == 0xc0 {
return VariantMicrosoft
} else if b&0xe0 == 0xe0 {
return VariantFuture
}
return VariantUnknown
}
func (g *GUID) setVersion(v Version) {
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
}
// Version returns the GUID version, as defined in RFC 4122.
func (g GUID) Version() Version {
return Version((g.Data3 & 0xF000) >> 12)
}
// MarshalText returns the textual representation of the GUID.
func (g GUID) MarshalText() ([]byte, error) {
return []byte(g.String()), nil
}
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
// into this GUID.
func (g *GUID) UnmarshalText(text []byte) error {
g2, err := FromString(string(text))
if err != nil {
return err
}
*g = g2
return nil
}

View File

@@ -1,3 +1,3 @@
package winio
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go

View File

@@ -1,4 +1,4 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
// Code generated by 'go generate'; DO NOT EDIT.
package winio
@@ -38,19 +38,25 @@ func errnoErr(e syscall.Errno) error {
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
@@ -69,6 +75,7 @@ var (
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
procbind = modws2_32.NewProc("bind")
)
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
@@ -120,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
return
}
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
var _p0 uint32
if wait {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
@@ -176,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA
return
}
func waitNamedPipe(name string, timeout uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _waitNamedPipe(_p0, timeout)
}
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
@@ -227,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntstatus(r0)
return
}
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntstatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntstatus(r0)
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
@@ -518,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
}
return
}
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

7
vendor/github.com/aktau/github-release/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,7 @@
/github-release
/go-app
*.exe
/bin
/var/cache

10
vendor/github.com/aktau/github-release/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,10 @@
## Releasing new versions
1) Bump the version in github/version.go
2) Add a commit with message "github-release v1.2.3"
3) Run `git tag v1.2.3` where "1.2.3" stands in for the version you actually
want.
4) Run `make release`. Be sure to have `GITHUB_TOKEN` set in your environment.

89
vendor/github.com/aktau/github-release/Gopkg.lock generated vendored Normal file
View File

@@ -0,0 +1,89 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74"
name = "github.com/dustin/go-humanize"
packages = ["."]
pruneopts = "UT"
revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e"
version = "v1.0.0"
[[projects]]
digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d"
name = "github.com/go-stack/stack"
packages = ["."]
pruneopts = "UT"
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
version = "v1.8.0"
[[projects]]
digest = "1:5c56664d98f37f0ee54bf572b0b189a3910c34c31052fc7d58b282c449b079fb"
name = "github.com/inconshreveable/log15"
packages = ["."]
pruneopts = "UT"
revision = "b30bc20e4fd12cec79a9aae62e91cfcf458bd253"
version = "v2.15"
[[projects]]
digest = "1:de0adde670b2119824a1252b61a0e989669f8b24af874e399bec0e0538b2f928"
name = "github.com/kevinburke/rest"
packages = ["."]
pruneopts = "UT"
revision = "0d2892b400f81cdfb979e2f718e6070fae17a507"
version = "2.2"
[[projects]]
digest = "1:0109cf4321a15313ec895f42e723e1f76121c6975ea006abfa20012272ec0937"
name = "github.com/mattn/go-colorable"
packages = ["."]
pruneopts = "UT"
revision = "68e95eba382c972aafde02ead2cd2426a8a92480"
version = "v0.1.6"
[[projects]]
digest = "1:0c58d31abe2a2ccb429c559b6292e7df89dcda675456fecc282fa90aa08273eb"
name = "github.com/mattn/go-isatty"
packages = ["."]
pruneopts = "UT"
revision = "7b513a986450394f7bbf1476909911b3aa3a55ce"
version = "v0.0.12"
[[projects]]
branch = "master"
digest = "1:f2dda646a25cf3b9f6f6931d86ef0b85b64979697e5833cdcbc0c23f2586d996"
name = "github.com/tomnomnom/linkheader"
packages = ["."]
pruneopts = "UT"
revision = "02ca5825eb8097f10d9cc53da78481a85ad84e04"
[[projects]]
digest = "1:d1d1683ae67edaebf7e51151f934943bf2f53b71260818d843ab7233aa145533"
name = "github.com/voxelbrain/goptions"
packages = ["."]
pruneopts = "UT"
revision = "26cb8b04692384f4dc269de3b5fcf3e2ef78573e"
version = "2.5.11"
[[projects]]
branch = "master"
digest = "1:8cab10971112233c82c83683a517378038eba1c20e71b29c592b73fa212437b3"
name = "golang.org/x/sys"
packages = [
"internal/unsafeheader",
"unix",
]
pruneopts = "UT"
revision = "bc7a7d42d5c30f4d0fe808715c002826ce2c624e"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/dustin/go-humanize",
"github.com/kevinburke/rest",
"github.com/tomnomnom/linkheader",
"github.com/voxelbrain/goptions",
]
solver-name = "gps-cdcl"
solver-version = 1

24
vendor/github.com/aktau/github-release/Gopkg.toml generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
[[constraint]]
name = "github.com/dustin/go-humanize"
version = "1.0.0"
[[constraint]]
name = "github.com/kevinburke/rest"
version = "2.2.0"
[[constraint]]
name = "github.com/tomnomnom/linkheader"
branch = "master"
[[constraint]]
name = "github.com/voxelbrain/goptions"
version = "2.5.11"
[prune]
go-tests = true
unused-packages = true

21
vendor/github.com/aktau/github-release/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014-2017 Nicolas Hillegeer
Copyright (c) 2020 Meter, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

97
vendor/github.com/aktau/github-release/Makefile generated vendored Normal file
View File

@@ -0,0 +1,97 @@
SHELL=/bin/bash -o pipefail
LAST_TAG := $(shell git describe --abbrev=0 --tags)
USER := github-release
EXECUTABLE := github-release
# only include the amd64 binaries, otherwise the github release will become
# too big
UNIX_EXECUTABLES := \
darwin/amd64/$(EXECUTABLE) \
freebsd/amd64/$(EXECUTABLE) \
linux/amd64/$(EXECUTABLE)
WIN_EXECUTABLES := \
windows/amd64/$(EXECUTABLE).exe
COMPRESSED_EXECUTABLES=$(UNIX_EXECUTABLES:%=%.bz2) $(WIN_EXECUTABLES:%.exe=%.zip)
COMPRESSED_EXECUTABLE_TARGETS=$(COMPRESSED_EXECUTABLES:%=bin/%)
UPLOAD_CMD = bin/tmp/$(EXECUTABLE) upload -u $(USER) -r $(EXECUTABLE) -t $(LAST_TAG) -n $(subst /,-,$(FILE)) -f bin/$(FILE)
all: $(EXECUTABLE)
# the executable used to perform the upload, dogfooding and all...
bin/tmp/$(EXECUTABLE):
go build -v -o "$@"
# arm
bin/linux/arm/5/$(EXECUTABLE):
GOARM=5 GOARCH=arm GOOS=linux go build -o "$@"
bin/linux/arm/7/$(EXECUTABLE):
GOARM=7 GOARCH=arm GOOS=linux go build -o "$@"
# 386
bin/darwin/386/$(EXECUTABLE):
GOARCH=386 GOOS=darwin go build -o "$@"
bin/linux/386/$(EXECUTABLE):
GOARCH=386 GOOS=linux go build -o "$@"
bin/windows/386/$(EXECUTABLE):
GOARCH=386 GOOS=windows go build -o "$@"
# amd64
bin/freebsd/amd64/$(EXECUTABLE):
GOARCH=amd64 GOOS=freebsd go build -o "$@"
bin/darwin/amd64/$(EXECUTABLE):
GOARCH=amd64 GOOS=darwin go build -o "$@"
bin/linux/amd64/$(EXECUTABLE):
GOARCH=amd64 GOOS=linux go build -o "$@"
bin/windows/amd64/$(EXECUTABLE).exe:
GOARCH=amd64 GOOS=windows go build -o "$@"
# compressed artifacts, makes a huge difference (Go executable is ~9MB,
# after compressing ~2MB)
%.bz2: %
bzip2 --keep "$<"
%.zip: %.exe
zip "$@" "$<"
# git tag -a v$(RELEASE) -m 'release $(RELEASE)'
release: clean
ifndef GITHUB_TOKEN
@echo "Please set GITHUB_TOKEN in the environment to perform a release"
exit 1
endif
docker run --rm --volume $(PWD)/var/cache:/root/.cache/go-build \
--env GITHUB_TOKEN=$(GITHUB_TOKEN) \
--volume "$(PWD)":/go/src/github.com/github-release/github-release \
--workdir /go/src/github.com/github-release/github-release \
meterup/ubuntu-golang:latest \
./release \
"$(MAKE) bin/tmp/$(EXECUTABLE) $(COMPRESSED_EXECUTABLE_TARGETS) && \
git log --format=%B $(LAST_TAG) -1 | \
bin/tmp/$(EXECUTABLE) release -u $(USER) -r $(EXECUTABLE) \
-t $(LAST_TAG) -n $(LAST_TAG) -d - || true && \
$(foreach FILE,$(COMPRESSED_EXECUTABLES),$(UPLOAD_CMD);)"
# install and/or update all dependencies, run this from the project directory
# go get -u ./...
# go test -i ./
dep:
go list -f '{{join .Deps "\n"}}' | xargs go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | xargs go get -u
$(EXECUTABLE): dep
go build -o "$@"
install:
go install
clean:
rm go-app || true
rm $(EXECUTABLE) || true
rm -rf bin/
test:
go test ./...
.PHONY: clean release dep install

131
vendor/github.com/aktau/github-release/README.md generated vendored Normal file
View File

@@ -0,0 +1,131 @@
github-release
==============
A small commandline app written in Go that allows you to easily create
and delete releases of your projects on Github. In addition it allows
you to attach files to those releases.
It interacts with the [github releases API][releases-api]. Though it's entirely
possible to [do all these things with cURL][curl], it's not really that
user-friendly. For example, you need to first query the API to find the id of
the release you want, before you can upload an artifact. `github-release` takes
care of those little details.
[curl]: https://github.com/blog/1645-releases-api-preview
[releases-api]: https://developer.github.com/v3/repos/releases
It might still be a bit rough around the edges, pull requests are
welcome!
How to install
==============
If you don't have the Go toolset installed, and you don't want to, but
still want to use the app, you can download binaries for your platform
on the [releases
page](https://github.com/github-release/github-release/releases/latest). Yes, that's
dogfooding, check the makefile!
If you have Go installed, you can just do:
```sh
go get github.com/github-release/github-release
```
This will automatically download, compile and install the app.
After that you should have a `github-release` executable in your
`$GOPATH/bin`.
How to use
==========
**NOTE**: for these examples I've [created a github token][token] and set it as
the env variable `GITHUB_TOKEN`. `github-release` will automatically pick it up
from the environment so that you don't have to pass it as an argument.
[token]: https://help.github.com/articles/creating-an-access-token-for-command-line-use
```sh
# set your token
export GITHUB_TOKEN=...
# check the help
$ github-release --help
# make your tag and upload
$ git tag ... && git push --tags
# check the current tags and existing releases of the repo
$ github-release info -u aktau -r gofinance
git tags:
- v0.1.0 (commit: https://api.github.com/repos/aktau/gofinance/commits/f562727ce83ce8971a8569a1879219e41d56a756)
releases:
- v0.1.0, name: 'hoary ungar', description: 'something something dark side 2', id: 166740, tagged: 29/01/2014 at 14:27, published: 30/01/2014 at 16:20, draft: ✔, prerelease: ✗
- artifact: github.go, downloads: 0, state: uploaded, type: application/octet-stream, size: 1.9KB, id: 68616
# create a formal release
$ github-release release \
--user aktau \
--repo gofinance \
--tag v0.1.0 \
--name "the wolf of source street" \
--description "Not a movie, contrary to popular opinion. Still, my first release!" \
--pre-release
# you've made a mistake, but you can edit the release without
# having to delete it first (this also means you can edit without having
# to upload your files again)
$ github-release edit \
--user aktau \
--repo gofinance \
--tag v0.1.0 \
--name "Highlander II: The Quickening" \
--description "This is the actual description!"
# upload a file, for example the OSX/AMD64 binary of my gofinance app
$ github-release upload \
--user aktau \
--repo gofinance \
--tag v0.1.0 \
--name "gofinance-osx-amd64" \
--file bin/darwin/amd64/gofinance
# upload other files...
$ github-release upload ...
# you're not happy with it, so delete it
$ github-release delete \
--user aktau \
--repo gofinance \
--tag v0.1.0
```
GitHub Enterprise Support
=========================
You can point to a different GitHub API endpoint via the environment variable ```GITHUB_API```:
```
export GITHUB_API=http://github.company.com/api/v3
```
Used libraries
==============
| Package | Description | License |
| ------------------------------------------------------------------------ | ------------------- | ------- |
| [github.com/dustin/go-humanize](https://github.com/dustin/go-humanize) | humanize file sizes | MIT |
| [github.com/tomnomnom/linkheader](https://github.com/tomnomnom/linkheader) | GH API pagination | MIT |
| [github.com/voxelbrain/goptions](https://github.com/voxelbrain/goptions) | option parsing | BSD |
| [github.com/kevinburke/rest](https://github.com/kevinburke/rest) | HTTP client | MIT |
Todo
====
- Check if an artifact is already uploaded before starting a new upload
Copyright
=========
Copyright (c) 2014-2017, Nicolas Hillegeer. All rights reserved.
Copyright (c) 2020, Meter, Inc. All rights reserved.

58
vendor/github.com/aktau/github-release/assets.go generated vendored Normal file
View File

@@ -0,0 +1,58 @@
package main
import (
"fmt"
"net/http"
"time"
"github.com/github-release/github-release/github"
)
const (
// GET /repos/:owner/:repo/releases/assets/:id
// DELETE /repos/:owner/:repo/releases/assets/:id
ASSET_URI = "/repos/%s/%s/releases/assets/%d"
// API: https://developer.github.com/v3/repos/releases/#list-assets-for-a-release
// GET /repos/:owner/:repo/releases/:id/assets
ASSET_RELEASE_LIST_URI = "/repos/%s/%s/releases/%d/assets"
)
type Asset struct {
Url string `json:"url"`
Id int `json:"id"`
Name string `json:"name"`
ContentType string `json:"content_type"`
State string `json:"state"`
Size uint64 `json:"size"`
Downloads uint64 `json:"download_count"`
Created time.Time `json:"created_at"`
Published time.Time `json:"published_at"`
}
// findAsset returns the asset if an asset with name can be found in assets,
// otherwise returns nil.
func findAsset(assets []Asset, name string) *Asset {
for _, asset := range assets {
if asset.Name == name {
return &asset
}
}
return nil
}
// Delete sends a HTTP DELETE request for the given asset to Github. Returns
// nil if the asset was deleted OR there was nothing to delete.
func (a *Asset) Delete(user, repo, token string) error {
URL := nvls(EnvApiEndpoint, github.DefaultBaseURL) +
fmt.Sprintf(ASSET_URI, user, repo, a.Id)
resp, err := github.DoAuthRequest("DELETE", URL, "application/json", token, nil, nil)
if err != nil {
return fmt.Errorf("failed to delete asset %s (ID: %d), HTTP error: %b", a.Name, a.Id, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("failed to delete asset %s (ID: %d), status: %s", a.Name, a.Id, resp.Status)
}
return nil
}

489
vendor/github.com/aktau/github-release/cmd.go generated vendored Normal file
View File

@@ -0,0 +1,489 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"github.com/github-release/github-release/github"
)
func infocmd(opt Options) error {
user := nvls(opt.Info.User, EnvUser)
authUser := nvls(opt.Info.AuthUser, EnvAuthUser)
repo := nvls(opt.Info.Repo, EnvRepo)
token := nvls(opt.Info.Token, EnvToken)
tag := opt.Info.Tag
if user == "" || repo == "" {
return fmt.Errorf("user and repo need to be passed as arguments")
}
// Find regular git tags.
foundTags, err := Tags(user, repo, authUser, token)
if err != nil {
return fmt.Errorf("could not fetch tags, %v", err)
}
if len(foundTags) == 0 {
return fmt.Errorf("no tags available for %v/%v", user, repo)
}
tags := foundTags[:0]
for _, t := range foundTags {
// If the user only requested one tag, filter out the rest.
if tag == "" || t.Name == tag {
tags = append(tags, t)
}
}
renderer := renderInfoText
if opt.Info.JSON {
renderer = renderInfoJSON
}
// List releases + assets.
var releases []Release
if tag == "" {
// Get all releases.
vprintf("%v/%v: getting information for all releases\n", user, repo)
releases, err = Releases(user, repo, authUser, token)
if err != nil {
return err
}
} else {
// Get only one release.
vprintf("%v/%v/%v: getting information for the release\n", user, repo, tag)
release, err := ReleaseOfTag(user, repo, tag, authUser, token)
if err != nil {
return err
}
releases = []Release{*release}
}
return renderer(tags, releases)
}
func renderInfoText(tags []Tag, releases []Release) error {
fmt.Println("tags:")
for _, tag := range tags {
fmt.Println("-", &tag)
}
fmt.Println("releases:")
for _, release := range releases {
fmt.Println("-", &release)
}
return nil
}
func renderInfoJSON(tags []Tag, releases []Release) error {
out := struct {
Tags []Tag
Releases []Release
}{
Tags: tags,
Releases: releases,
}
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(&out)
}
func uploadcmd(opt Options) error {
user := nvls(opt.Upload.User, EnvUser)
authUser := nvls(opt.Upload.AuthUser, EnvAuthUser)
repo := nvls(opt.Upload.Repo, EnvRepo)
token := nvls(opt.Upload.Token, EnvToken)
tag := opt.Upload.Tag
name := opt.Upload.Name
label := opt.Upload.Label
file := opt.Upload.File
vprintln("uploading...")
if file == nil {
return fmt.Errorf("provided file was not valid")
}
defer file.Close()
if err := ValidateCredentials(user, repo, token, tag); err != nil {
return err
}
// Find the release corresponding to the entered tag, if any.
rel, err := ReleaseOfTag(user, repo, tag, authUser, token)
if err != nil {
return err
}
// If the user has attempted to upload this asset before, someone could
// expect it to be present in the release struct (rel.Assets). However,
// we have to separately ask for the specific assets of this release.
// Reason: the assets in the Release struct do not contain incomplete
// uploads (which regrettably happen often using the Github API). See
// issue #26.
var assets []Asset
client := github.NewClient(authUser, token, nil)
client.SetBaseURL(EnvApiEndpoint)
err = client.Get(fmt.Sprintf(ASSET_RELEASE_LIST_URI, user, repo, rel.Id), &assets)
if err != nil {
return err
}
// Incomplete (failed) uploads will have their state set to new. These
// assets are (AFAIK) useless in all cases. The only thing they will do
// is prevent the upload of another asset of the same name. To work
// around this GH API weirdness, let's just delete assets if:
//
// 1. Their state is new.
// 2. The user explicitly asked to delete/replace the asset with -R.
if asset := findAsset(assets, name); asset != nil &&
(asset.State == "new" || opt.Upload.Replace) {
vprintf("asset (id: %d) already existed in state %s: removing...\n", asset.Id, asset.Name)
if err := asset.Delete(user, repo, token); err != nil {
return fmt.Errorf("could not replace asset: %v", err)
}
}
v := url.Values{}
v.Set("name", name)
if label != "" {
v.Set("label", label)
}
url := rel.CleanUploadUrl() + "?" + v.Encode()
resp, err := github.DoAuthRequest("POST", url, "application/octet-stream",
token, nil, file)
if err != nil {
return fmt.Errorf("can't create upload request to %v, %v", url, err)
}
defer resp.Body.Close()
vprintln("RESPONSE:", resp)
var r io.Reader = resp.Body
if VERBOSITY != 0 {
r = io.TeeReader(r, os.Stderr)
}
var asset *Asset
// For HTTP status 201 and 502, Github will return a JSON encoding of
// the (partially) created asset.
if resp.StatusCode == http.StatusBadGateway || resp.StatusCode == http.StatusCreated {
vprintf("ASSET: ")
asset = new(Asset)
if err := json.NewDecoder(r).Decode(&asset); err != nil {
return fmt.Errorf("upload failed (%s), could not unmarshal asset (err: %v)", resp.Status, err)
}
} else {
vprintf("BODY: ")
if msg, err := ToMessage(r); err == nil {
return fmt.Errorf("could not upload, status code (%s), %v",
resp.Status, msg)
}
return fmt.Errorf("could not upload, status code (%s)", resp.Status)
}
if resp.StatusCode == http.StatusBadGateway {
// 502 means the upload failed, but GitHub still retains metadata
// (an asset in state "new"). Attempt to delete that now since it
// would clutter the list of release assets.
vprintf("asset (id: %d) failed to upload, it's now in state %s: removing...\n", asset.Id, asset.Name)
if err := asset.Delete(user, repo, token); err != nil {
return fmt.Errorf("upload failed (%s), could not delete partially uploaded asset (ID: %d, err: %v) in order to cleanly reset GH API state, please try again", resp.Status, asset.Id, err)
}
return fmt.Errorf("could not upload, status code (%s)", resp.Status)
}
return nil
}
func downloadcmd(opt Options) error {
user := nvls(opt.Download.User, EnvUser)
authUser := nvls(opt.Download.AuthUser, EnvAuthUser)
repo := nvls(opt.Download.Repo, EnvRepo)
token := nvls(opt.Download.Token, EnvToken)
tag := opt.Download.Tag
name := opt.Download.Name
latest := opt.Download.Latest
vprintln("downloading...")
if err := ValidateTarget(user, repo, tag, latest); err != nil {
return err
}
// Find the release corresponding to the entered tag, if any.
var rel *Release
var err error
if latest {
rel, err = LatestRelease(user, repo, authUser, token)
} else {
rel, err = ReleaseOfTag(user, repo, tag, authUser, token)
}
if err != nil {
return err
}
asset := findAsset(rel.Assets, name)
if asset == nil {
return fmt.Errorf("coud not find asset named %s", name)
}
var resp *http.Response
if token == "" {
// Use the regular github.com site if we don't have a token.
resp, err = http.Get("https://github.com" + fmt.Sprintf("/%s/%s/releases/download/%s/%s", user, repo, tag, name))
} else {
url := nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf(ASSET_URI, user, repo, asset.Id)
resp, err = github.DoAuthRequest("GET", url, "", token, map[string]string{
"Accept": "application/octet-stream",
}, nil)
}
if err != nil {
return fmt.Errorf("could not fetch releases, %v", err)
}
defer resp.Body.Close()
vprintln("GET", resp.Request.URL, "->", resp)
contentLength, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("github did not respond with 200 OK but with %v", resp.Status)
}
out := os.Stdout // Pipe the asset to stdout by default.
if isCharDevice(out) {
// If stdout is a char device, assume it's a TTY (terminal). In this
// case, don't pipe th easset to stdout, but create it as a file in
// the current working folder.
if out, err = os.Create(name); err != nil {
return fmt.Errorf("could not create file %s", name)
}
defer out.Close()
}
return mustCopyN(out, resp.Body, contentLength)
}
// mustCopyN attempts to copy exactly N bytes, if this fails, an error is
// returned.
func mustCopyN(w io.Writer, r io.Reader, n int64) error {
an, err := io.Copy(w, r)
if an != n {
return fmt.Errorf("data did not match content length %d != %d", an, n)
}
return err
}
func ValidateTarget(user, repo, tag string, latest bool) error {
if user == "" {
return fmt.Errorf("empty user")
}
if repo == "" {
return fmt.Errorf("empty repo")
}
if tag == "" && !latest {
return fmt.Errorf("empty tag")
}
return nil
}
func ValidateCredentials(user, repo, token, tag string) error {
if err := ValidateTarget(user, repo, tag, false); err != nil {
return err
}
if token == "" {
return fmt.Errorf("empty token")
}
return nil
}
func releasecmd(opt Options) error {
cmdopt := opt.Release
user := nvls(cmdopt.User, EnvUser)
repo := nvls(cmdopt.Repo, EnvRepo)
token := nvls(cmdopt.Token, EnvToken)
tag := cmdopt.Tag
name := nvls(cmdopt.Name, tag)
desc := nvls(cmdopt.Desc, tag)
target := nvls(cmdopt.Target)
draft := cmdopt.Draft
prerelease := cmdopt.Prerelease
vprintln("releasing...")
if err := ValidateCredentials(user, repo, token, tag); err != nil {
return err
}
// Check if we need to read the description from stdin.
if desc == "-" {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf("could not read description from stdin: %v", err)
}
desc = string(b)
}
params := ReleaseCreate{
TagName: tag,
TargetCommitish: target,
Name: name,
Body: desc,
Draft: draft,
Prerelease: prerelease,
}
/* encode params as json */
payload, err := json.Marshal(params)
if err != nil {
return fmt.Errorf("can't encode release creation params, %v", err)
}
reader := bytes.NewReader(payload)
URL := nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf("/repos/%s/%s/releases", user, repo)
resp, err := github.DoAuthRequest("POST", URL, "application/json", token, nil, reader)
if err != nil {
return fmt.Errorf("while submitting %v, %v", string(payload), err)
}
defer resp.Body.Close()
vprintln("RESPONSE:", resp)
if resp.StatusCode != http.StatusCreated {
if resp.StatusCode == 422 {
return fmt.Errorf("github returned %v (this is probably because the release already exists)",
resp.Status)
}
return fmt.Errorf("github returned %v", resp.Status)
}
if VERBOSITY != 0 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("error while reading response, %v", err)
}
vprintln("BODY:", string(body))
}
return nil
}
func editcmd(opt Options) error {
cmdopt := opt.Edit
user := nvls(cmdopt.User, EnvUser)
authUser := nvls(cmdopt.AuthUser, EnvAuthUser)
repo := nvls(cmdopt.Repo, EnvRepo)
token := nvls(cmdopt.Token, EnvToken)
tag := cmdopt.Tag
name := nvls(cmdopt.Name, tag)
desc := nvls(cmdopt.Desc, tag)
draft := cmdopt.Draft
prerelease := cmdopt.Prerelease
vprintln("editing...")
if err := ValidateCredentials(user, repo, token, tag); err != nil {
return err
}
id, err := IdOfTag(user, repo, tag, authUser, token)
if err != nil {
return err
}
vprintf("release %v has id %v\n", tag, id)
// Check if we need to read the description from stdin.
if desc == "-" {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return fmt.Errorf("could not read description from stdin: %v", err)
}
desc = string(b)
}
/* the release create struct works for editing releases as well */
params := ReleaseCreate{
TagName: tag,
Name: name,
Body: desc,
Draft: draft,
Prerelease: prerelease,
}
/* encode the parameters as JSON, as required by the github API */
payload, err := json.Marshal(params)
if err != nil {
return fmt.Errorf("can't encode release creation params, %v", err)
}
URL := nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf("/repos/%s/%s/releases/%d", user, repo, id)
resp, err := github.DoAuthRequest("PATCH", URL, "application/json", token, nil, bytes.NewReader(payload))
if err != nil {
return fmt.Errorf("while submitting %v, %v", string(payload), err)
}
defer resp.Body.Close()
vprintln("RESPONSE:", resp)
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == 422 {
return fmt.Errorf("github returned %v (this is probably because the release already exists)",
resp.Status)
}
return fmt.Errorf("github returned unexpected status code %v", resp.Status)
}
if VERBOSITY != 0 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("error while reading response, %v", err)
}
vprintln("BODY:", string(body))
}
return nil
}
func deletecmd(opt Options) error {
user, repo, token, tag := nvls(opt.Delete.User, EnvUser),
nvls(opt.Delete.Repo, EnvRepo),
nvls(opt.Delete.Token, EnvToken),
opt.Delete.Tag
authUser := nvls(opt.Delete.AuthUser, EnvAuthUser)
vprintln("deleting...")
id, err := IdOfTag(user, repo, tag, authUser, token)
if err != nil {
return err
}
vprintf("release %v has id %v\n", tag, id)
baseURL := nvls(EnvApiEndpoint, github.DefaultBaseURL)
resp, err := github.DoAuthRequest("DELETE", baseURL+fmt.Sprintf("/repos/%s/%s/releases/%d",
user, repo, id), "application/json", token, nil, nil)
if err != nil {
return fmt.Errorf("release deletion failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("could not delete the release corresponding to tag %s on repo %s/%s",
tag, user, repo)
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More