diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000000..b1fc51b0236
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,87 @@
+###############################################################################
+# Set default behavior to automatically normalize line endings.
+###############################################################################
+* text=auto
+
+# Shell scripts should always use line feed not crlf
+*.sh text eol=lf
+
+###############################################################################
+# Set default behavior for command prompt diff.
+#
+# This is need for earlier builds of msysgit that does not have it on by
+# default for csharp files.
+# Note: This is only used by command line
+###############################################################################
+#*.cs diff=csharp
+
+###############################################################################
+# Set the merge driver for project and solution files
+#
+# Merging from the command prompt will add diff markers to the files if there
+# are conflicts (Merging from VS is not affected by the settings below, in VS
+# the diff markers are never inserted). Diff markers may cause the following
+# file extensions to fail to load in VS. An alternative would be to treat
+# these files as binary and thus will always conflict and require user
+# intervention with every merge. To do so, just uncomment the entries below
+###############################################################################
+*.js text
+*.json text
+*.resjson text
+*.htm text
+*.html text
+*.xml text
+*.txt text
+*.ini text
+*.inc text
+#*.sln merge=binary
+#*.csproj merge=binary
+#*.vbproj merge=binary
+#*.vcxproj merge=binary
+#*.vcproj merge=binary
+#*.dbproj merge=binary
+#*.fsproj merge=binary
+#*.lsproj merge=binary
+#*.wixproj merge=binary
+#*.modelproj merge=binary
+#*.sqlproj merge=binary
+#*.wwaproj merge=binary
+
+###############################################################################
+# behavior for image files
+#
+# image files are treated as binary by default.
+###############################################################################
+*.png binary
+*.jpg binary
+*.jpeg binary
+*.gif binary
+*.ico binary
+*.mov binary
+*.mp4 binary
+*.mp3 binary
+*.flv binary
+*.fla binary
+*.swf binary
+*.gz binary
+*.zip binary
+*.7z binary
+*.ttf binary
+
+###############################################################################
+# diff behavior for common document formats
+#
+# Convert binary document formats to text before diffing them. This feature
+# is only available from the command line. Turn it on by uncommenting the
+# entries below.
+###############################################################################
+*.doc diff=astextplain
+*.DOC diff=astextplain
+*.docx diff=astextplain
+*.DOCX diff=astextplain
+*.dot diff=astextplain
+*.DOT diff=astextplain
+*.pdf diff=astextplain
+*.PDF diff=astextplain
+*.rtf diff=astextplain
+*.RTF diff=astextplain
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 00000000000..13d002d9a05
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,10 @@
+## Runner Version and Platform
+Version of your runner?
+
+OS of the machine running the runner? OSX/Windows/Linux/...
+
+## What's not working?
+Please include error messages and screenshots.
+
+## Runner and Worker's Diagnostic Logs
+Logs are located in the runner's `_diag` folder. The runner logs are prefixed with `Runner_` and the worker logs are prefixed with `Worker_`. All sensitive information should already be masked out, but please double-check before pasting here.
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 00000000000..52fca6c9ae6
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,52 @@
+name: Runner CI
+
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - '*'
+
+jobs:
+ build:
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest, macOS-latest]
+ include:
+ - os: ubuntu-latest
+ devScript: ./dev.sh
+ - os: macOS-latest
+ devScript: ./dev.sh
+ - os: windows-latest
+ devScript: dev.cmd
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v1
+
+ # Build runner layout
+ - name: Build & Layout Release
+ run: |
+ ${{ matrix.devScript }} layout Release
+ working-directory: src
+
+ # Run tests
+ - name: L0
+ run: |
+ ${{ matrix.devScript }} test
+ working-directory: src
+
+ # Create runner package tar.gz/zip
+ - name: Package Release
+ if: github.event_name != 'pull_request'
+ run: |
+ ${{ matrix.devScript }} package Release
+ working-directory: src
+
+ # Upload runner package tar.gz/zip as artifact
+ - name: Publish Artifact
+ if: github.event_name != 'pull_request'
+ uses: actions/upload-artifact@v1
+ with:
+ name: runner-package-${{ matrix.os }}
+ path: _package
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000000..3ce91e72ed5
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,23 @@
+**/bin
+**/obj
+**/libs
+**/*.xproj
+**/*.xproj.user
+**/*.sln
+**/.vs
+**/.vscode
+**/*.error
+**/*.json.pretty
+node_modules
+_downloads
+_layout
+_package
+_dotnetsdk
+TestResults
+TestLogs
+.DS_Store
+**/*.DotSettings.user
+
+#generated
+src/Runner.Sdk/BuildConstants.cs
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000000..fd74dc7cb92
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright (c) Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 00000000000..f586f61b74f
--- /dev/null
+++ b/README.md
@@ -0,0 +1,31 @@
+# GitHub Actions Runner
+
+
+
+
+
+[![Actions Status](https://github.com/actions/runner/workflows/Runner%20CI/badge.svg)](https://github.com/actions/runner/actions)
+
+## Get Started
+
+![win](docs/res/win_sm.png) [Pre-reqs](docs/start/envwin.md) | [Download](https://github.com/actions/runner/releases/latest)
+
+![macOS](docs/res/apple_sm.png) [Pre-reqs](docs/start/envosx.md) | [Download](https://github.com/actions/runner/releases/latest)
+
+![linux](docs/res/linux_sm.png) [Pre-reqs](docs/start/envlinux.md) | [Download](https://github.com/actions/runner/releases/latest)
+
+**Configure:**
+
+*MacOS and Linux*
+```bash
+./config.sh
+```
+
+*Windows*
+```bash
+config.cmd
+```
+
+## Contribute
+
+For developers that want to contribute, [read here](docs/contribute.md) on how to build and test.
diff --git a/assets.json b/assets.json
new file mode 100644
index 00000000000..a6cfba2f00b
--- /dev/null
+++ b/assets.json
@@ -0,0 +1,20 @@
+[
+ {
+ "name": "actions-runner-win-x64-.zip",
+ "platform": "win-x64",
+ "version": "",
+ "downloadUrl": "https://githubassets.azureedge.net/runners//actions-runner-win-x64-.zip"
+ },
+ {
+ "name": "actions-runner-osx-x64-.tar.gz",
+ "platform": "osx-x64",
+ "version": "",
+ "downloadUrl": "https://githubassets.azureedge.net/runners//actions-runner-osx-x64-.tar.gz"
+ },
+ {
+ "name": "actions-runner-linux-x64-.tar.gz",
+ "platform": "linux-x64",
+ "version": "",
+ "downloadUrl": "https://githubassets.azureedge.net/runners//actions-runner-linux-x64-.tar.gz"
+ }
+]
\ No newline at end of file
diff --git a/azure-pipelines-release.yml b/azure-pipelines-release.yml
new file mode 100644
index 00000000000..25ea0aa2afc
--- /dev/null
+++ b/azure-pipelines-release.yml
@@ -0,0 +1,177 @@
+stages:
+- stage: Build
+ jobs:
+ ################################################################################
+ - job: build_windows_agent_x64
+ ################################################################################
+ displayName: Windows Agent (x64)
+ pool:
+ vmImage: vs2017-win2016
+ steps:
+
+ # Steps template for windows platform
+ - template: windows.template.yml
+
+ # Package dotnet core windows dependency (VC++ Redistributable)
+ - powershell: |
+ Write-Host "Downloading 'VC++ Redistributable' package."
+ $outDir = Join-Path -Path $env:TMP -ChildPath ([Guid]::NewGuid())
+ New-Item -Path $outDir -ItemType directory
+ $outFile = Join-Path -Path $outDir -ChildPath "ucrt.zip"
+ Invoke-WebRequest -Uri https://vstsagenttools.blob.core.windows.net/tools/ucrt/ucrt_x64.zip -OutFile $outFile
+ Write-Host "Unzipping 'VC++ Redistributable' package to agent layout."
+ $unzipDir = Join-Path -Path $outDir -ChildPath "unzip"
+ Add-Type -AssemblyName System.IO.Compression.FileSystem
+ [System.IO.Compression.ZipFile]::ExtractToDirectory($outFile, $unzipDir)
+ $agentLayoutBin = Join-Path -Path $(Build.SourcesDirectory) -ChildPath "_layout\bin"
+ Copy-Item -Path $unzipDir -Destination $agentLayoutBin -Force
+ displayName: Package UCRT
+
+ # Create agent package zip
+ - script: dev.cmd package Release
+ workingDirectory: src
+ displayName: Package Release
+
+ # Upload agent package zip as build artifact
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Artifact (Windows)
+ inputs:
+ pathToPublish: _package
+ artifactName: runners
+ artifactType: container
+
+ ################################################################################
+ - job: build_linux_agent_x64
+ ################################################################################
+ displayName: Linux Agent (x64)
+ pool:
+ vmImage: ubuntu-16.04
+ steps:
+
+ # Steps template for non-windows platform
+ - template: nonwindows.template.yml
+
+ # Create agent package zip
+ - script: ./dev.sh package Release
+ workingDirectory: src
+ displayName: Package Release
+
+ # Upload agent package zip as build artifact
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Artifact (Linux)
+ inputs:
+ pathToPublish: _package
+ artifactName: runners
+ artifactType: container
+
+ ################################################################################
+ - job: build_osx_agent
+ ################################################################################
+ displayName: macOS Agent (x64)
+ pool:
+ vmImage: macOS-10.13
+ steps:
+
+ # Steps template for non-windows platform
+ - template: nonwindows.template.yml
+
+ # Create agent package zip
+ - script: ./dev.sh package Release
+ workingDirectory: src
+ displayName: Package Release
+
+ # Upload agent package zip as build artifact
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Artifact (OSX)
+ inputs:
+ pathToPublish: _package
+ artifactName: runners
+ artifactType: container
+
+- stage: Release
+ dependsOn: Build
+ jobs:
+ ################################################################################
+ - job: publish_agent_packages
+ ################################################################################
+ displayName: Publish Agents (Windows/Linux/OSX)
+ pool:
+ name: ProductionRMAgents
+ steps:
+
+ # Download all agent packages from all previous phases
+ - task: DownloadBuildArtifacts@0
+ displayName: Download Agent Packages
+ inputs:
+ artifactName: runners
+
+ # Upload agent packages to Azure blob storage and refresh Azure CDN
+ - powershell: |
+ Write-Host "Preloading Azure modules." # This is for better performance, to avoid module-autoloading.
+ Import-Module AzureRM, AzureRM.profile, AzureRM.Storage, Azure.Storage, AzureRM.Cdn -ErrorAction Ignore -PassThru
+ Enable-AzureRmAlias -Scope CurrentUser
+ $uploadFiles = New-Object System.Collections.ArrayList
+ $certificateThumbprint = (Get-ItemProperty -Path "$(ServicePrincipalReg)").ServicePrincipalCertThumbprint
+ $clientId = (Get-ItemProperty -Path "$(ServicePrincipalReg)").ServicePrincipalClientId
+ Write-Host "##vso[task.setsecret]$certificateThumbprint"
+ Write-Host "##vso[task.setsecret]$clientId"
+ Login-AzureRmAccount -ServicePrincipal -CertificateThumbprint $certificateThumbprint -ApplicationId $clientId -TenantId $(GitHubTenantId)
+ Select-AzureRmSubscription -SubscriptionId $(GitHubSubscriptionId)
+ $storage = Get-AzureRmStorageAccount -ResourceGroupName githubassets -AccountName githubassets
+ Get-ChildItem -LiteralPath "$(System.ArtifactsDirectory)/runners" | ForEach-Object {
+ $versionDir = $_.Name.Trim('.zip').Trim('.tar.gz')
+ $versionDir = $versionDir.SubString($versionDir.LastIndexOf('-') + 1)
+ Write-Host "##vso[task.setvariable variable=ReleaseAgentVersion;]$versionDir"
+ Write-Host "Uploading $_ to BlobStorage githubassets/runners/$versionDir"
+ Set-AzureStorageBlobContent -Context $storage.Context -Container runners -File "$(System.ArtifactsDirectory)/runners/$_" -Blob "$versionDir/$_" -Force
+ $uploadFiles.Add("/runners/$versionDir/$_")
+ }
+ Write-Host "Get CDN info"
+ Get-AzureRmCdnEndpoint -ProfileName githubassets -ResourceGroupName githubassets
+ Write-Host "Purge Azure CDN Cache"
+ Unpublish-AzureRmCdnEndpointContent -EndpointName githubassets -ProfileName githubassets -ResourceGroupName githubassets -PurgeContent $uploadFiles
+ Write-Host "Pull assets through Azure CDN"
+ $uploadFiles | ForEach-Object {
+ $downloadUrl = "https://githubassets.azureedge.net" + $_
+ Write-Host $downloadUrl
+ Invoke-WebRequest -Uri $downloadUrl -OutFile $_.SubString($_.LastIndexOf('/') + 1)
+ }
+ displayName: Upload to Azure Blob
+
+ # Create agent release on Github
+ - powershell: |
+ Write-Host "Creating github release."
+ $releaseNotes = [System.IO.File]::ReadAllText("$(Build.SourcesDirectory)\releaseNote.md").Replace("","$(ReleaseAgentVersion)")
+ $releaseData = @{
+ tag_name = "v$(ReleaseAgentVersion)";
+ target_commitish = "$(Build.SourceVersion)";
+ name = "v$(ReleaseAgentVersion)";
+ body = $releaseNotes;
+ draft = $false;
+ prerelease = $true;
+ }
+ $releaseParams = @{
+ Uri = "https://api.github.com/repos/actions/runner/releases";
+ Method = 'POST';
+ Headers = @{
+ Authorization = 'Basic ' + [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes("github:$(GithubToken)"));
+ }
+ ContentType = 'application/json';
+ Body = (ConvertTo-Json $releaseData -Compress)
+ }
+ [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
+ $releaseCreated = Invoke-RestMethod @releaseParams
+ Write-Host $releaseCreated
+ $releaseId = $releaseCreated.id
+ $assets = [System.IO.File]::ReadAllText("$(Build.SourcesDirectory)\assets.json").Replace("","$(ReleaseAgentVersion)")
+ $assetsParams = @{
+ Uri = "https://uploads.github.com/repos/actions/runner/releases/$releaseId/assets?name=assets.json"
+ Method = 'POST';
+ Headers = @{
+ Authorization = 'Basic ' + [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes("github:$(GithubToken)"));
+ }
+ ContentType = 'application/octet-stream';
+ Body = [system.Text.Encoding]::UTF8.GetBytes($assets)
+ }
+ Invoke-RestMethod @assetsParams
+ displayName: Create agent release on Github
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
new file mode 100644
index 00000000000..8706b1fb89c
--- /dev/null
+++ b/azure-pipelines.yml
@@ -0,0 +1,95 @@
+jobs:
+
+################################################################################
+- job: build_windows_x64_agent
+################################################################################
+ displayName: Windows Agent (x64)
+ pool:
+ vmImage: vs2017-win2016
+ steps:
+
+ # Steps template for windows platform
+ - template: windows.template.yml
+
+ # Package dotnet core windows dependency (VC++ Redistributable)
+ - powershell: |
+ Write-Host "Downloading 'VC++ Redistributable' package."
+ $outDir = Join-Path -Path $env:TMP -ChildPath ([Guid]::NewGuid())
+ New-Item -Path $outDir -ItemType directory
+ $outFile = Join-Path -Path $outDir -ChildPath "ucrt.zip"
+ Invoke-WebRequest -Uri https://vstsagenttools.blob.core.windows.net/tools/ucrt/ucrt_x64.zip -OutFile $outFile
+ Write-Host "Unzipping 'VC++ Redistributable' package to agent layout."
+ $unzipDir = Join-Path -Path $outDir -ChildPath "unzip"
+ Add-Type -AssemblyName System.IO.Compression.FileSystem
+ [System.IO.Compression.ZipFile]::ExtractToDirectory($outFile, $unzipDir)
+ $agentLayoutBin = Join-Path -Path $(Build.SourcesDirectory) -ChildPath "_layout\bin"
+ Copy-Item -Path $unzipDir -Destination $agentLayoutBin -Force
+ displayName: Package UCRT
+ condition: and(succeeded(), ne(variables['build.reason'], 'PullRequest'))
+
+ # Create agent package zip
+ - script: dev.cmd package Release
+ workingDirectory: src
+ displayName: Package Release
+ condition: and(succeeded(), ne(variables['build.reason'], 'PullRequest'))
+
+ # Upload agent package zip as build artifact
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Artifact (Windows x64)
+ condition: and(succeeded(), ne(variables['build.reason'], 'PullRequest'))
+ inputs:
+ pathToPublish: _package
+ artifactName: agent
+ artifactType: container
+
+################################################################################
+- job: build_linux_x64_agent
+################################################################################
+ displayName: Linux Agent (x64)
+ pool:
+ vmImage: ubuntu-16.04
+ steps:
+
+ # Steps template for non-windows platform
+ - template: nonwindows.template.yml
+
+ # Create agent package zip
+ - script: ./dev.sh package Release
+ workingDirectory: src
+ displayName: Package Release
+ condition: and(succeeded(), ne(variables['build.reason'], 'PullRequest'))
+
+ # Upload agent package zip as build artifact
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Artifact (Linux x64)
+ condition: and(succeeded(), ne(variables['build.reason'], 'PullRequest'))
+ inputs:
+ pathToPublish: _package
+ artifactName: agent
+ artifactType: container
+
+################################################################################
+- job: build_osx_agent
+################################################################################
+ displayName: macOS Agent (x64)
+ pool:
+ vmImage: macOS-10.14
+ steps:
+
+ # Steps template for non-windows platform
+ - template: nonwindows.template.yml
+
+ # Create agent package zip
+ - script: ./dev.sh package Release
+ workingDirectory: src
+ displayName: Package Release
+ condition: and(succeeded(), ne(variables['build.reason'], 'PullRequest'))
+
+ # Upload agent package zip as build artifact
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Artifact (OSX)
+ condition: and(succeeded(), ne(variables['build.reason'], 'PullRequest'))
+ inputs:
+ pathToPublish: _package
+ artifactName: agent
+ artifactType: container
diff --git a/docs/contribute.md b/docs/contribute.md
new file mode 100644
index 00000000000..4546f21352a
--- /dev/null
+++ b/docs/contribute.md
@@ -0,0 +1,41 @@
+# Contribute (Dev)
+
+## Dev Dependencies
+
+![Win](res/win_sm.png) Git for Windows [Install Here](https://git-scm.com/downloads) (needed for dev sh script)
+
+## Build, Test, Layout
+
+From src:
+
+![Win](res/win_sm.png) `dev {command}`
+
+![*nix](res/linux_sm.png) `./dev.sh {command}`
+
+**Commands:**
+
+`layout` (`l`): Run first time to create a full agent layout in {root}/_layout
+
+`build` (`b`): build everything and update agent layout folder
+
+`test` (`t`): build agent binaries and run unit tests
+
+Normal dev flow:
+```bash
+git clone https://github.com/actions/runner
+cd ./src
+./dev.(sh/cmd) layout # the agent that build from source is in {root}/_layout
+
+./dev.(sh/cmd) build # {root}/_layout will get updated
+./dev.(sh/cmd) test # run all unit tests before git commit/push
+```
+
+## Editors
+
+[Using Visual Studio 2019](https://www.visualstudio.com/vs/)
+[Using Visual Studio Code](https://code.visualstudio.com/)
+
+## Styling
+
+We use the dotnet foundation and CoreCLR style guidelines [located here](
+https://github.com/dotnet/corefx/blob/master/Documentation/coding-guidelines/coding-style.md)
diff --git a/docs/res/apple_med.png b/docs/res/apple_med.png
new file mode 100644
index 00000000000..467a59ae488
Binary files /dev/null and b/docs/res/apple_med.png differ
diff --git a/docs/res/apple_sm.png b/docs/res/apple_sm.png
new file mode 100644
index 00000000000..5edf774da51
Binary files /dev/null and b/docs/res/apple_sm.png differ
diff --git a/docs/res/github-graph.png b/docs/res/github-graph.png
new file mode 100644
index 00000000000..70fe21d0270
Binary files /dev/null and b/docs/res/github-graph.png differ
diff --git a/docs/res/linux_med.png b/docs/res/linux_med.png
new file mode 100644
index 00000000000..905a7076bfc
Binary files /dev/null and b/docs/res/linux_med.png differ
diff --git a/docs/res/linux_sm.png b/docs/res/linux_sm.png
new file mode 100644
index 00000000000..a71f801d6b6
Binary files /dev/null and b/docs/res/linux_sm.png differ
diff --git a/docs/res/redhat_med.png b/docs/res/redhat_med.png
new file mode 100644
index 00000000000..4a869a51436
Binary files /dev/null and b/docs/res/redhat_med.png differ
diff --git a/docs/res/redhat_sm.png b/docs/res/redhat_sm.png
new file mode 100644
index 00000000000..d35f47b6962
Binary files /dev/null and b/docs/res/redhat_sm.png differ
diff --git a/docs/res/ubuntu_med.png b/docs/res/ubuntu_med.png
new file mode 100644
index 00000000000..2901c709d7f
Binary files /dev/null and b/docs/res/ubuntu_med.png differ
diff --git a/docs/res/ubuntu_sm.png b/docs/res/ubuntu_sm.png
new file mode 100644
index 00000000000..c93022f11e1
Binary files /dev/null and b/docs/res/ubuntu_sm.png differ
diff --git a/docs/res/win_med.png b/docs/res/win_med.png
new file mode 100644
index 00000000000..18d560fe0e6
Binary files /dev/null and b/docs/res/win_med.png differ
diff --git a/docs/res/win_sm.png b/docs/res/win_sm.png
new file mode 100644
index 00000000000..73d134a4f51
Binary files /dev/null and b/docs/res/win_sm.png differ
diff --git a/docs/start/envlinux.md b/docs/start/envlinux.md
new file mode 100644
index 00000000000..ddc87bd12a9
--- /dev/null
+++ b/docs/start/envlinux.md
@@ -0,0 +1,40 @@
+
+
+# ![Linux](../res/linux_med.png) Linux System Prerequisites
+
+## Supported Distributions and Versions
+
+x64
+ - Red Hat Enterprise Linux 6 (see note 1), 7
+ - CentOS 6 (see note 1), 7
+ - Oracle Linux 7
+ - Fedora 28, 27
+ - Debian 9, 8.7 or later versions
+ - Ubuntu 18.04, Ubuntu 16.04, Ubuntu 14.04
+ - Linux Mint 18, 17
+ - openSUSE 42.3 or later versions
+ - SUSE Enterprise Linux (SLES) 12 SP2 or later versions
+
+ARM32 (see note 2)
+ - Debian 9 or later versions
+ - Ubuntu 18.04 or later versions
+
+> Note 1: Red Hat Enterprise Linux 6 and CentOS 6 require installing the specialized "rhel.6-x64" agent package
+> Note 2: ARM instruction set [ARMv7](https://en.wikipedia.org/wiki/List_of_ARM_microarchitectures) or above is required, you can get your device's information by executing `uname -a`
+
+## Install .Net Core 2.x Linux Dependencies
+
+The `./config.sh` will check .Net Core 2.x dependencies during agent configuration.
+You might see something like this which indicate a dependency's missing.
+```bash
+./config.sh
+ libunwind.so.8 => not found
+ libunwind-x86_64.so.8 => not found
+Dependencies is missing for Dotnet Core 2.1
+Execute ./bin/installdependencies.sh to install any missing Dotnet Core 2.1 dependencies.
+```
+You can easily correct the problem by executing `./bin/installdependencies.sh`.
+The `installdependencies.sh` script should install all required dependencies on all supported Linux versions
+> Note: The `installdependencies.sh` script will try to use the default package management mechanism on your Linux flavor (ex. `yum`/`apt-get`/`apt`). You might need to deal with error coming from the package management mechanism related to your setup, like [#1353](https://github.com/Microsoft/vsts-agent/issues/1353)
+
+## [More .Net Core Prerequisites Information](https://docs.microsoft.com/en-us/dotnet/core/linux-prerequisites?tabs=netcore2x)
diff --git a/docs/start/envosx.md b/docs/start/envosx.md
new file mode 100644
index 00000000000..13969600a25
--- /dev/null
+++ b/docs/start/envosx.md
@@ -0,0 +1,10 @@
+
+
+# ![osx](../res/apple_med.png) macOS/OS X System Prerequisites
+
+## Supported Versions
+
+ - macOS Sierra (10.12) and later versions
+
+
+## [More .Net Core Prerequisites Information](https://docs.microsoft.com/en-us/dotnet/core/macos-prerequisites?tabs=netcore2x)
diff --git a/docs/start/envwin.md b/docs/start/envwin.md
new file mode 100644
index 00000000000..4dd316990ef
--- /dev/null
+++ b/docs/start/envwin.md
@@ -0,0 +1,12 @@
+# ![win](../res/win_med.png) Windows System Prerequisites
+
+## Supported Versions
+
+ - Windows 7 64-bit
+ - Windows 8.1 64-bit
+ - Windows 10 64-bit
+ - Windows Server 2008 R2 SP1 64-bit
+ - Windows Server 2012 R2 64-bit
+ - Windows Server 2016 64-bit
+
+## [More .Net Core Prerequisites Information](https://docs.microsoft.com/en-us/dotnet/core/windows-prerequisites?tabs=netcore2x)
diff --git a/images/arm/Dockerfile b/images/arm/Dockerfile
new file mode 100644
index 00000000000..233468fd045
--- /dev/null
+++ b/images/arm/Dockerfile
@@ -0,0 +1,7 @@
+FROM mcr.microsoft.com/dotnet/core/runtime-deps:2.1
+
+RUN apt-get update \
+ && apt-get install -y --no-install-recommends \
+ curl \
+ git \
+ && rm -rf /var/lib/apt/lists/*
diff --git a/images/centos6/Dockerfile b/images/centos6/Dockerfile
new file mode 100644
index 00000000000..8270e68b987
--- /dev/null
+++ b/images/centos6/Dockerfile
@@ -0,0 +1,150 @@
+FROM centos:6
+
+# Install dependencies
+
+RUN yum install -y \
+ centos-release-SCL \
+ epel-release \
+ wget \
+ unzip \
+ && \
+ rpm --import http://linuxsoft.cern.ch/cern/slc6X/x86_64/RPM-GPG-KEY-cern && \
+ wget -O /etc/yum.repos.d/slc6-devtoolset.repo http://linuxsoft.cern.ch/cern/devtoolset/slc6-devtoolset.repo && \
+ yum install -y \
+ "perl(Time::HiRes)" \
+ autoconf \
+ cmake \
+ cmake3 \
+ devtoolset-2-toolchain \
+ doxygen \
+ expat-devel \
+ gcc \
+ gcc-c++ \
+ gdb \
+ gettext-devel \
+ krb5-devel \
+ libedit-devel \
+ libidn-devel \
+ libmetalink-devel \
+ libnghttp2-devel \
+ libssh2-devel \
+ libunwind-devel \
+ libuuid-devel \
+ lttng-ust-devel \
+ lzma \
+ ncurses-devel \
+ openssl-devel \
+ perl-devel \
+ python-argparse \
+ python27 \
+ readline-devel \
+ swig \
+ xz \
+ zlib-devel \
+ && \
+ yum clean all
+
+# Build and install clang and lldb 3.9.1
+
+RUN wget ftp://sourceware.org/pub/binutils/snapshots/binutils-2.29.1.tar.xz && \
+ wget http://releases.llvm.org/3.9.1/cfe-3.9.1.src.tar.xz && \
+ wget http://releases.llvm.org/3.9.1/llvm-3.9.1.src.tar.xz && \
+ wget http://releases.llvm.org/3.9.1/lldb-3.9.1.src.tar.xz && \
+ wget http://releases.llvm.org/3.9.1/compiler-rt-3.9.1.src.tar.xz && \
+ \
+ tar -xf binutils-2.29.1.tar.xz && \
+ tar -xf llvm-3.9.1.src.tar.xz && \
+ mkdir llvm-3.9.1.src/tools/clang && \
+ mkdir llvm-3.9.1.src/tools/lldb && \
+ mkdir llvm-3.9.1.src/projects/compiler-rt && \
+ tar -xf cfe-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/clang && \
+ tar -xf lldb-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/tools/lldb && \
+ tar -xf compiler-rt-3.9.1.src.tar.xz --strip 1 -C llvm-3.9.1.src/projects/compiler-rt && \
+ rm binutils-2.29.1.tar.xz && \
+ rm cfe-3.9.1.src.tar.xz && \
+ rm lldb-3.9.1.src.tar.xz && \
+ rm llvm-3.9.1.src.tar.xz && \
+ rm compiler-rt-3.9.1.src.tar.xz && \
+ \
+ mkdir llvmbuild && \
+ cd llvmbuild && \
+ scl enable python27 devtoolset-2 \
+ ' \
+ cmake3 \
+ -DCMAKE_CXX_COMPILER=/opt/rh/devtoolset-2/root/usr/bin/g++ \
+ -DCMAKE_C_COMPILER=/opt/rh/devtoolset-2/root/usr/bin/gcc \
+ -DCMAKE_LINKER=/opt/rh/devtoolset-2/root/usr/bin/ld \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DLLVM_LIBDIR_SUFFIX=64 \
+ -DLLVM_ENABLE_EH=1 \
+ -DLLVM_ENABLE_RTTI=1 \
+ -DLLVM_BINUTILS_INCDIR=../binutils-2.29.1/include \
+ ../llvm-3.9.1.src \
+ && \
+ make -j $(($(getconf _NPROCESSORS_ONLN)+1)) && \
+ make install \
+ ' && \
+ cd .. && \
+ rm -r llvmbuild && \
+ rm -r llvm-3.9.1.src && \
+ rm -r binutils-2.29.1
+
+# Build and install curl 7.45.0
+
+RUN wget https://curl.haxx.se/download/curl-7.45.0.tar.lzma && \
+ tar -xf curl-7.45.0.tar.lzma && \
+ rm curl-7.45.0.tar.lzma && \
+ cd curl-7.45.0 && \
+ scl enable python27 devtoolset-2 \
+ ' \
+ ./configure \
+ --disable-dict \
+ --disable-ftp \
+ --disable-gopher \
+ --disable-imap \
+ --disable-ldap \
+ --disable-ldaps \
+ --disable-libcurl-option \
+ --disable-manual \
+ --disable-pop3 \
+ --disable-rtsp \
+ --disable-smb \
+ --disable-smtp \
+ --disable-telnet \
+ --disable-tftp \
+ --enable-ipv6 \
+ --enable-optimize \
+ --enable-symbol-hiding \
+ --with-ca-bundle=/etc/pki/tls/certs/ca-bundle.crt \
+ --with-nghttp2 \
+ --with-gssapi \
+ --with-ssl \
+ --without-librtmp \
+ && \
+ make install \
+ ' && \
+ cd .. && \
+ rm -r curl-7.45.0
+
+# Install ICU 57.1
+
+RUN wget http://download.icu-project.org/files/icu4c/57.1/icu4c-57_1-RHEL6-x64.tgz && \
+ tar -xf icu4c-57_1-RHEL6-x64.tgz -C / && \
+ rm icu4c-57_1-RHEL6-x64.tgz
+
+# Compile and install a version of the git that supports the features that cli repo build needs
+# NOTE: The git needs to be built after the curl so that it can use the libcurl to add https
+# protocol support.
+RUN \
+ wget https://www.kernel.org/pub/software/scm/git/git-2.9.5.tar.gz && \
+ tar -xf git-2.9.5.tar.gz && \
+ rm git-2.9.5.tar.gz && \
+ cd git-2.9.5 && \
+ make configure && \
+ ./configure --prefix=/usr/local --without-tcltk && \
+ make -j $(nproc --all) all && \
+ make install && \
+ cd .. && \
+ rm -r git-2.9.5
+
+ENV LD_LIBRARY_PATH=/usr/local/lib
diff --git a/nonwindows.template.yml b/nonwindows.template.yml
new file mode 100644
index 00000000000..833bd0500ba
--- /dev/null
+++ b/nonwindows.template.yml
@@ -0,0 +1,29 @@
+steps:
+
+# Build agent layout
+- script: ./dev.sh layout Release
+ workingDirectory: src
+ displayName: Build & Layout Release
+
+# Run test
+- script: ./dev.sh test
+ workingDirectory: src
+ displayName: Test
+
+# # Publish test results
+# - task: PublishTestResults@2
+# displayName: Publish Test Results **/*.trx
+# condition: always()
+# inputs:
+# testRunner: VSTest
+# testResultsFiles: '**/*.trx'
+# testRunTitle: 'Agent Tests'
+
+# # Upload test log
+# - task: PublishBuildArtifacts@1
+# displayName: Publish Test logs
+# condition: always()
+# inputs:
+# pathToPublish: src/Test/TestLogs
+# artifactName: $(System.JobId)
+# artifactType: container
diff --git a/releaseNote.md b/releaseNote.md
new file mode 100644
index 00000000000..e6e2e888239
--- /dev/null
+++ b/releaseNote.md
@@ -0,0 +1,44 @@
+## Features
+ - Runner config auth via GitHub.com. (#107) (#117)
+ - Adding wrapper action to support post job cleanup, adding checkout v1.1 (#91)
+ - Improving terminal experience (#110)
+ - Add runner support for cache action. (#120)
+
+## Bugs
+ - Set GITHUB_ACTIONS in containers. (#119)
+ - Fix issue data column/col mismatch. (#122)
+
+## Misc
+ - Use GitHub actions for CI/PR (#112)
+ - Code Cleanup (#123) (#124) (#125)
+
+## Agent Downloads
+
+| | Package |
+| ------- | ----------------------------------------------------------------------------------------------------------- |
+| Windows x64 | [actions-runner-win-x64-.zip](https://githubassets.azureedge.net/runners//actions-runner-win-x64-.zip) |
+| macOS | [actions-runner-osx-x64-.tar.gz](https://githubassets.azureedge.net/runners//actions-runner-osx-x64-.tar.gz) |
+| Linux x64 | [actions-runner-linux-x64-.tar.gz](https://githubassets.azureedge.net/runners//actions-runner-linux-x64-.tar.gz) |
+
+After Download:
+
+## Windows x64
+
+``` bash
+C:\> mkdir myagent && cd myagent
+C:\myagent> Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$HOME\Downloads\actions-runner-win-x64-.zip", "$PWD")
+```
+
+## OSX
+
+``` bash
+~/$ mkdir myagent && cd myagent
+~/myagent$ tar xzf ~/Downloads/actions-runner-osx-x64-.tar.gz
+```
+
+## Linux x64
+
+``` bash
+~/$ mkdir myagent && cd myagent
+~/myagent$ tar xzf ~/Downloads/actions-runner-linux-x64-.tar.gz
+```
diff --git a/src/Misc/dotnet-install.ps1 b/src/Misc/dotnet-install.ps1
new file mode 100644
index 00000000000..2ff62da5aaf
--- /dev/null
+++ b/src/Misc/dotnet-install.ps1
@@ -0,0 +1,650 @@
+#
+# Copyright (c) .NET Foundation and contributors. All rights reserved.
+# Licensed under the MIT license. See LICENSE file in the project root for full license information.
+#
+
+<#
+.SYNOPSIS
+ Installs dotnet cli
+.DESCRIPTION
+ Installs dotnet cli. If dotnet installation already exists in the given directory
+ it will update it only if the requested version differs from the one already installed.
+.PARAMETER Channel
+ Default: LTS
+ Download from the Channel specified. Possible values:
+ - Current - most current release
+ - LTS - most current supported release
+ - 2-part version in a format A.B - represents a specific release
+ examples: 2.0, 1.0
+ - Branch name
+ examples: release/2.0.0, Master
+ Note: The version parameter overrides the channel parameter.
+.PARAMETER Version
+ Default: latest
+ Represents a build version on specific channel. Possible values:
+ - latest - most latest build on specific channel
+ - coherent - most latest coherent build on specific channel
+ coherent applies only to SDK downloads
+ - 3-part version in a format A.B.C - represents specific version of build
+ examples: 2.0.0-preview2-006120, 1.1.0
+.PARAMETER InstallDir
+ Default: %LocalAppData%\Microsoft\dotnet
+ Path to where to install dotnet. Note that binaries will be placed directly in a given directory.
+.PARAMETER Architecture
+ Default: - this value represents currently running OS architecture
+ Architecture of dotnet binaries to be installed.
+ Possible values are: , amd64, x64, x86, arm64, arm
+.PARAMETER SharedRuntime
+ This parameter is obsolete and may be removed in a future version of this script.
+ The recommended alternative is '-Runtime dotnet'.
+
+ Default: false
+ Installs just the shared runtime bits, not the entire SDK.
+ This is equivalent to specifying `-Runtime dotnet`.
+.PARAMETER Runtime
+ Installs just a shared runtime, not the entire SDK.
+ Possible values:
+ - dotnet - the Microsoft.NETCore.App shared runtime
+ - aspnetcore - the Microsoft.AspNetCore.App shared runtime
+ - windowsdesktop - the Microsoft.WindowsDesktop.App shared runtime
+.PARAMETER DryRun
+ If set it will not perform installation but instead display what command line to use to consistently install
+ currently requested version of dotnet cli. In example if you specify version 'latest' it will display a link
+ with specific version so that this command can be used deterministicly in a build script.
+ It also displays binaries location if you prefer to install or download it yourself.
+.PARAMETER NoPath
+ By default this script will set environment variable PATH for the current process to the binaries folder inside installation folder.
+ If set it will display binaries location but not set any environment variable.
+.PARAMETER Verbose
+ Displays diagnostics information.
+.PARAMETER AzureFeed
+ Default: https://dotnetcli.azureedge.net/dotnet
+ This parameter typically is not changed by the user.
+ It allows changing the URL for the Azure feed used by this installer.
+.PARAMETER UncachedFeed
+ This parameter typically is not changed by the user.
+ It allows changing the URL for the Uncached feed used by this installer.
+.PARAMETER FeedCredential
+ Used as a query string to append to the Azure feed.
+ It allows changing the URL to use non-public blob storage accounts.
+.PARAMETER ProxyAddress
+ If set, the installer will use the proxy when making web requests
+.PARAMETER ProxyUseDefaultCredentials
+ Default: false
+ Use default credentials, when using proxy address.
+.PARAMETER SkipNonVersionedFiles
+ Default: false
+ Skips installing non-versioned files if they already exist, such as dotnet.exe.
+.PARAMETER NoCdn
+ Disable downloading from the Azure CDN, and use the uncached feed directly.
+#>
+[cmdletbinding()]
+param(
+ [string]$Channel="LTS",
+ [string]$Version="Latest",
+ [string]$InstallDir="",
+ [string]$Architecture="",
+ [ValidateSet("dotnet", "aspnetcore", "windowsdesktop", IgnoreCase = $false)]
+ [string]$Runtime,
+ [Obsolete("This parameter may be removed in a future version of this script. The recommended alternative is '-Runtime dotnet'.")]
+ [switch]$SharedRuntime,
+ [switch]$DryRun,
+ [switch]$NoPath,
+ [string]$AzureFeed="https://dotnetcli.azureedge.net/dotnet",
+ [string]$UncachedFeed="https://dotnetcli.blob.core.windows.net/dotnet",
+ [string]$FeedCredential,
+ [string]$ProxyAddress,
+ [switch]$ProxyUseDefaultCredentials,
+ [switch]$SkipNonVersionedFiles,
+ [switch]$NoCdn
+)
+
+Set-StrictMode -Version Latest
+$ErrorActionPreference="Stop"
+$ProgressPreference="SilentlyContinue"
+
+if ($NoCdn) {
+ $AzureFeed = $UncachedFeed
+}
+
+$BinFolderRelativePath=""
+
+if ($SharedRuntime -and (-not $Runtime)) {
+ $Runtime = "dotnet"
+}
+
+# example path with regex: shared/1.0.0-beta-12345/somepath
+$VersionRegEx="/\d+\.\d+[^/]+/"
+$OverrideNonVersionedFiles = !$SkipNonVersionedFiles
+
+function Say($str) {
+ Write-Host "dotnet-install: $str"
+}
+
+function Say-Verbose($str) {
+ Write-Verbose "dotnet-install: $str"
+}
+
+function Say-Invocation($Invocation) {
+ $command = $Invocation.MyCommand;
+ $args = (($Invocation.BoundParameters.Keys | foreach { "-$_ `"$($Invocation.BoundParameters[$_])`"" }) -join " ")
+ Say-Verbose "$command $args"
+}
+
+function Invoke-With-Retry([ScriptBlock]$ScriptBlock, [int]$MaxAttempts = 3, [int]$SecondsBetweenAttempts = 1) {
+ $Attempts = 0
+
+ while ($true) {
+ try {
+ return $ScriptBlock.Invoke()
+ }
+ catch {
+ $Attempts++
+ if ($Attempts -lt $MaxAttempts) {
+ Start-Sleep $SecondsBetweenAttempts
+ }
+ else {
+ throw
+ }
+ }
+ }
+}
+
+function Get-Machine-Architecture() {
+ Say-Invocation $MyInvocation
+
+ # possible values: amd64, x64, x86, arm64, arm
+ return $ENV:PROCESSOR_ARCHITECTURE
+}
+
+function Get-CLIArchitecture-From-Architecture([string]$Architecture) {
+ Say-Invocation $MyInvocation
+
+ switch ($Architecture.ToLower()) {
+ { $_ -eq "" } { return Get-CLIArchitecture-From-Architecture $(Get-Machine-Architecture) }
+ { ($_ -eq "amd64") -or ($_ -eq "x64") } { return "x64" }
+ { $_ -eq "x86" } { return "x86" }
+ { $_ -eq "arm" } { return "arm" }
+ { $_ -eq "arm64" } { return "arm64" }
+ default { throw "Architecture not supported. If you think this is a bug, report it at https://github.com/dotnet/cli/issues" }
+ }
+}
+
+# The version text returned from the feeds is a 1-line or 2-line string:
+# For the SDK and the dotnet runtime (2 lines):
+# Line 1: # commit_hash
+# Line 2: # 4-part version
+# For the aspnetcore runtime (1 line):
+# Line 1: # 4-part version
+function Get-Version-Info-From-Version-Text([string]$VersionText) {
+ Say-Invocation $MyInvocation
+
+ $Data = -split $VersionText
+
+ $VersionInfo = @{
+ CommitHash = $(if ($Data.Count -gt 1) { $Data[0] })
+ Version = $Data[-1] # last line is always the version number.
+ }
+ return $VersionInfo
+}
+
+function Load-Assembly([string] $Assembly) {
+ try {
+ Add-Type -Assembly $Assembly | Out-Null
+ }
+ catch {
+ # On Nano Server, Powershell Core Edition is used. Add-Type is unable to resolve base class assemblies because they are not GAC'd.
+ # Loading the base class assemblies is not unnecessary as the types will automatically get resolved.
+ }
+}
+
+function GetHTTPResponse([Uri] $Uri)
+{
+ Invoke-With-Retry(
+ {
+
+ $HttpClient = $null
+
+ try {
+ # HttpClient is used vs Invoke-WebRequest in order to support Nano Server which doesn't support the Invoke-WebRequest cmdlet.
+ Load-Assembly -Assembly System.Net.Http
+
+ if(-not $ProxyAddress) {
+ try {
+ # Despite no proxy being explicitly specified, we may still be behind a default proxy
+ $DefaultProxy = [System.Net.WebRequest]::DefaultWebProxy;
+ if($DefaultProxy -and (-not $DefaultProxy.IsBypassed($Uri))) {
+ $ProxyAddress = $DefaultProxy.GetProxy($Uri).OriginalString
+ $ProxyUseDefaultCredentials = $true
+ }
+ } catch {
+ # Eat the exception and move forward as the above code is an attempt
+ # at resolving the DefaultProxy that may not have been a problem.
+ $ProxyAddress = $null
+ Say-Verbose("Exception ignored: $_.Exception.Message - moving forward...")
+ }
+ }
+
+ if($ProxyAddress) {
+ $HttpClientHandler = New-Object System.Net.Http.HttpClientHandler
+ $HttpClientHandler.Proxy = New-Object System.Net.WebProxy -Property @{Address=$ProxyAddress;UseDefaultCredentials=$ProxyUseDefaultCredentials}
+ $HttpClient = New-Object System.Net.Http.HttpClient -ArgumentList $HttpClientHandler
+ }
+ else {
+
+ $HttpClient = New-Object System.Net.Http.HttpClient
+ }
+ # Default timeout for HttpClient is 100s. For a 50 MB download this assumes 500 KB/s average, any less will time out
+ # 20 minutes allows it to work over much slower connections.
+ $HttpClient.Timeout = New-TimeSpan -Minutes 20
+ $Response = $HttpClient.GetAsync("${Uri}${FeedCredential}").Result
+ if (($Response -eq $null) -or (-not ($Response.IsSuccessStatusCode))) {
+ # The feed credential is potentially sensitive info. Do not log FeedCredential to console output.
+ $ErrorMsg = "Failed to download $Uri."
+ if ($Response -ne $null) {
+ $ErrorMsg += " $Response"
+ }
+
+ throw $ErrorMsg
+ }
+
+ return $Response
+ }
+ finally {
+ if ($HttpClient -ne $null) {
+ $HttpClient.Dispose()
+ }
+ }
+ })
+}
+
+
+function Get-Latest-Version-Info([string]$AzureFeed, [string]$Channel, [bool]$Coherent) {
+ Say-Invocation $MyInvocation
+
+ $VersionFileUrl = $null
+ if ($Runtime -eq "dotnet") {
+ $VersionFileUrl = "$UncachedFeed/Runtime/$Channel/latest.version"
+ }
+ elseif ($Runtime -eq "aspnetcore") {
+ $VersionFileUrl = "$UncachedFeed/aspnetcore/Runtime/$Channel/latest.version"
+ }
+ # Currently, the WindowsDesktop runtime is manufactured with the .Net core runtime
+ elseif ($Runtime -eq "windowsdesktop") {
+ $VersionFileUrl = "$UncachedFeed/Runtime/$Channel/latest.version"
+ }
+ elseif (-not $Runtime) {
+ if ($Coherent) {
+ $VersionFileUrl = "$UncachedFeed/Sdk/$Channel/latest.coherent.version"
+ }
+ else {
+ $VersionFileUrl = "$UncachedFeed/Sdk/$Channel/latest.version"
+ }
+ }
+ else {
+ throw "Invalid value for `$Runtime"
+ }
+ try {
+ $Response = GetHTTPResponse -Uri $VersionFileUrl
+ }
+ catch {
+ throw "Could not resolve version information."
+ }
+ $StringContent = $Response.Content.ReadAsStringAsync().Result
+
+ switch ($Response.Content.Headers.ContentType) {
+ { ($_ -eq "application/octet-stream") } { $VersionText = $StringContent }
+ { ($_ -eq "text/plain") } { $VersionText = $StringContent }
+ { ($_ -eq "text/plain; charset=UTF-8") } { $VersionText = $StringContent }
+ default { throw "``$Response.Content.Headers.ContentType`` is an unknown .version file content type." }
+ }
+
+ $VersionInfo = Get-Version-Info-From-Version-Text $VersionText
+
+ return $VersionInfo
+}
+
+
+function Get-Specific-Version-From-Version([string]$AzureFeed, [string]$Channel, [string]$Version) {
+ Say-Invocation $MyInvocation
+
+ switch ($Version.ToLower()) {
+ { $_ -eq "latest" } {
+ $LatestVersionInfo = Get-Latest-Version-Info -AzureFeed $AzureFeed -Channel $Channel -Coherent $False
+ return $LatestVersionInfo.Version
+ }
+ { $_ -eq "coherent" } {
+ $LatestVersionInfo = Get-Latest-Version-Info -AzureFeed $AzureFeed -Channel $Channel -Coherent $True
+ return $LatestVersionInfo.Version
+ }
+ default { return $Version }
+ }
+}
+
+function Get-Download-Link([string]$AzureFeed, [string]$SpecificVersion, [string]$CLIArchitecture) {
+ Say-Invocation $MyInvocation
+
+ if ($Runtime -eq "dotnet") {
+ $PayloadURL = "$AzureFeed/Runtime/$SpecificVersion/dotnet-runtime-$SpecificVersion-win-$CLIArchitecture.zip"
+ }
+ elseif ($Runtime -eq "aspnetcore") {
+ $PayloadURL = "$AzureFeed/aspnetcore/Runtime/$SpecificVersion/aspnetcore-runtime-$SpecificVersion-win-$CLIArchitecture.zip"
+ }
+ elseif ($Runtime -eq "windowsdesktop") {
+ $PayloadURL = "$AzureFeed/Runtime/$SpecificVersion/windowsdesktop-runtime-$SpecificVersion-win-$CLIArchitecture.zip"
+ }
+ elseif (-not $Runtime) {
+ $PayloadURL = "$AzureFeed/Sdk/$SpecificVersion/dotnet-sdk-$SpecificVersion-win-$CLIArchitecture.zip"
+ }
+ else {
+ throw "Invalid value for `$Runtime"
+ }
+
+ Say-Verbose "Constructed primary named payload URL: $PayloadURL"
+
+ return $PayloadURL
+}
+
+function Get-LegacyDownload-Link([string]$AzureFeed, [string]$SpecificVersion, [string]$CLIArchitecture) {
+ Say-Invocation $MyInvocation
+
+ if (-not $Runtime) {
+ $PayloadURL = "$AzureFeed/Sdk/$SpecificVersion/dotnet-dev-win-$CLIArchitecture.$SpecificVersion.zip"
+ }
+ elseif ($Runtime -eq "dotnet") {
+ $PayloadURL = "$AzureFeed/Runtime/$SpecificVersion/dotnet-win-$CLIArchitecture.$SpecificVersion.zip"
+ }
+ else {
+ return $null
+ }
+
+ Say-Verbose "Constructed legacy named payload URL: $PayloadURL"
+
+ return $PayloadURL
+}
+
+function Get-User-Share-Path() {
+ Say-Invocation $MyInvocation
+
+ $InstallRoot = $env:DOTNET_INSTALL_DIR
+ if (!$InstallRoot) {
+ $InstallRoot = "$env:LocalAppData\Microsoft\dotnet"
+ }
+ return $InstallRoot
+}
+
+function Resolve-Installation-Path([string]$InstallDir) {
+ Say-Invocation $MyInvocation
+
+ if ($InstallDir -eq "") {
+ return Get-User-Share-Path
+ }
+ return $InstallDir
+}
+
+function Get-Version-Info-From-Version-File([string]$InstallRoot, [string]$RelativePathToVersionFile) {
+ Say-Invocation $MyInvocation
+
+ $VersionFile = Join-Path -Path $InstallRoot -ChildPath $RelativePathToVersionFile
+ Say-Verbose "Local version file: $VersionFile"
+
+ if (Test-Path $VersionFile) {
+ $VersionText = cat $VersionFile
+ Say-Verbose "Local version file text: $VersionText"
+ return Get-Version-Info-From-Version-Text $VersionText
+ }
+
+ Say-Verbose "Local version file not found."
+
+ return $null
+}
+
+function Is-Dotnet-Package-Installed([string]$InstallRoot, [string]$RelativePathToPackage, [string]$SpecificVersion) {
+ Say-Invocation $MyInvocation
+
+ $DotnetPackagePath = Join-Path -Path $InstallRoot -ChildPath $RelativePathToPackage | Join-Path -ChildPath $SpecificVersion
+ Say-Verbose "Is-Dotnet-Package-Installed: Path to a package: $DotnetPackagePath"
+ return Test-Path $DotnetPackagePath -PathType Container
+}
+
+function Get-Absolute-Path([string]$RelativeOrAbsolutePath) {
+ # Too much spam
+ # Say-Invocation $MyInvocation
+
+ return $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($RelativeOrAbsolutePath)
+}
+
+function Get-Path-Prefix-With-Version($path) {
+ $match = [regex]::match($path, $VersionRegEx)
+ if ($match.Success) {
+ return $entry.FullName.Substring(0, $match.Index + $match.Length)
+ }
+
+ return $null
+}
+
+function Get-List-Of-Directories-And-Versions-To-Unpack-From-Dotnet-Package([System.IO.Compression.ZipArchive]$Zip, [string]$OutPath) {
+ Say-Invocation $MyInvocation
+
+ $ret = @()
+ foreach ($entry in $Zip.Entries) {
+ $dir = Get-Path-Prefix-With-Version $entry.FullName
+ if ($dir -ne $null) {
+ $path = Get-Absolute-Path $(Join-Path -Path $OutPath -ChildPath $dir)
+ if (-Not (Test-Path $path -PathType Container)) {
+ $ret += $dir
+ }
+ }
+ }
+
+ $ret = $ret | Sort-Object | Get-Unique
+
+ $values = ($ret | foreach { "$_" }) -join ";"
+ Say-Verbose "Directories to unpack: $values"
+
+ return $ret
+}
+
+# Example zip content and extraction algorithm:
+# Rule: files if extracted are always being extracted to the same relative path locally
+# .\
+# a.exe # file does not exist locally, extract
+# b.dll # file exists locally, override only if $OverrideFiles set
+# aaa\ # same rules as for files
+# ...
+# abc\1.0.0\ # directory contains version and exists locally
+# ... # do not extract content under versioned part
+# abc\asd\ # same rules as for files
+# ...
+# def\ghi\1.0.1\ # directory contains version and does not exist locally
+# ... # extract content
+function Extract-Dotnet-Package([string]$ZipPath, [string]$OutPath) {
+ Say-Invocation $MyInvocation
+
+ Load-Assembly -Assembly System.IO.Compression.FileSystem
+ Set-Variable -Name Zip
+ try {
+ $Zip = [System.IO.Compression.ZipFile]::OpenRead($ZipPath)
+
+ $DirectoriesToUnpack = Get-List-Of-Directories-And-Versions-To-Unpack-From-Dotnet-Package -Zip $Zip -OutPath $OutPath
+
+ foreach ($entry in $Zip.Entries) {
+ $PathWithVersion = Get-Path-Prefix-With-Version $entry.FullName
+ if (($PathWithVersion -eq $null) -Or ($DirectoriesToUnpack -contains $PathWithVersion)) {
+ $DestinationPath = Get-Absolute-Path $(Join-Path -Path $OutPath -ChildPath $entry.FullName)
+ $DestinationDir = Split-Path -Parent $DestinationPath
+ $OverrideFiles=$OverrideNonVersionedFiles -Or (-Not (Test-Path $DestinationPath))
+ if ((-Not $DestinationPath.EndsWith("\")) -And $OverrideFiles) {
+ New-Item -ItemType Directory -Force -Path $DestinationDir | Out-Null
+ [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $DestinationPath, $OverrideNonVersionedFiles)
+ }
+ }
+ }
+ }
+ finally {
+ if ($Zip -ne $null) {
+ $Zip.Dispose()
+ }
+ }
+}
+
+function DownloadFile($Source, [string]$OutPath) {
+ if ($Source -notlike "http*") {
+ # Using System.IO.Path.GetFullPath to get the current directory
+ # does not work in this context - $pwd gives the current directory
+ if (![System.IO.Path]::IsPathRooted($Source)) {
+ $Source = $(Join-Path -Path $pwd -ChildPath $Source)
+ }
+ $Source = Get-Absolute-Path $Source
+ Say "Copying file from $Source to $OutPath"
+ Copy-Item $Source $OutPath
+ return
+ }
+
+ $Stream = $null
+
+ try {
+ $Response = GetHTTPResponse -Uri $Source
+ $Stream = $Response.Content.ReadAsStreamAsync().Result
+ $File = [System.IO.File]::Create($OutPath)
+ $Stream.CopyTo($File)
+ $File.Close()
+ }
+ finally {
+ if ($Stream -ne $null) {
+ $Stream.Dispose()
+ }
+ }
+}
+
+function Prepend-Sdk-InstallRoot-To-Path([string]$InstallRoot, [string]$BinFolderRelativePath) {
+ $BinPath = Get-Absolute-Path $(Join-Path -Path $InstallRoot -ChildPath $BinFolderRelativePath)
+ if (-Not $NoPath) {
+ $SuffixedBinPath = "$BinPath;"
+ if (-Not $env:path.Contains($SuffixedBinPath)) {
+ Say "Adding to current process PATH: `"$BinPath`". Note: This change will not be visible if PowerShell was run as a child process."
+ $env:path = $SuffixedBinPath + $env:path
+ } else {
+ Say-Verbose "Current process PATH already contains `"$BinPath`""
+ }
+ }
+ else {
+ Say "Binaries of dotnet can be found in $BinPath"
+ }
+}
+
+$CLIArchitecture = Get-CLIArchitecture-From-Architecture $Architecture
+$SpecificVersion = Get-Specific-Version-From-Version -AzureFeed $AzureFeed -Channel $Channel -Version $Version
+$DownloadLink = Get-Download-Link -AzureFeed $AzureFeed -SpecificVersion $SpecificVersion -CLIArchitecture $CLIArchitecture
+$LegacyDownloadLink = Get-LegacyDownload-Link -AzureFeed $AzureFeed -SpecificVersion $SpecificVersion -CLIArchitecture $CLIArchitecture
+
+$InstallRoot = Resolve-Installation-Path $InstallDir
+Say-Verbose "InstallRoot: $InstallRoot"
+$ScriptName = $MyInvocation.MyCommand.Name
+
+if ($DryRun) {
+ Say "Payload URLs:"
+ Say "Primary named payload URL: $DownloadLink"
+ if ($LegacyDownloadLink) {
+ Say "Legacy named payload URL: $LegacyDownloadLink"
+ }
+ $RepeatableCommand = ".\$ScriptName -Version `"$SpecificVersion`" -InstallDir `"$InstallRoot`" -Architecture `"$CLIArchitecture`""
+ if ($Runtime -eq "dotnet") {
+ $RepeatableCommand+=" -Runtime `"dotnet`""
+ }
+ elseif ($Runtime -eq "aspnetcore") {
+ $RepeatableCommand+=" -Runtime `"aspnetcore`""
+ }
+ foreach ($key in $MyInvocation.BoundParameters.Keys) {
+ if (-not (@("Architecture","Channel","DryRun","InstallDir","Runtime","SharedRuntime","Version") -contains $key)) {
+ $RepeatableCommand+=" -$key `"$($MyInvocation.BoundParameters[$key])`""
+ }
+ }
+ Say "Repeatable invocation: $RepeatableCommand"
+ exit 0
+}
+
+if ($Runtime -eq "dotnet") {
+ $assetName = ".NET Core Runtime"
+ $dotnetPackageRelativePath = "shared\Microsoft.NETCore.App"
+}
+elseif ($Runtime -eq "aspnetcore") {
+ $assetName = "ASP.NET Core Runtime"
+ $dotnetPackageRelativePath = "shared\Microsoft.AspNetCore.App"
+}
+elseif ($Runtime -eq "windowsdesktop") {
+ $assetName = ".NET Core Windows Desktop Runtime"
+ $dotnetPackageRelativePath = "shared\Microsoft.WindowsDesktop.App"
+}
+elseif (-not $Runtime) {
+ $assetName = ".NET Core SDK"
+ $dotnetPackageRelativePath = "sdk"
+}
+else {
+ throw "Invalid value for `$Runtime"
+}
+
+# Check if the SDK version is already installed.
+$isAssetInstalled = Is-Dotnet-Package-Installed -InstallRoot $InstallRoot -RelativePathToPackage $dotnetPackageRelativePath -SpecificVersion $SpecificVersion
+if ($isAssetInstalled) {
+ Say "$assetName version $SpecificVersion is already installed."
+ Prepend-Sdk-InstallRoot-To-Path -InstallRoot $InstallRoot -BinFolderRelativePath $BinFolderRelativePath
+ exit 0
+}
+
+New-Item -ItemType Directory -Force -Path $InstallRoot | Out-Null
+
+$installDrive = $((Get-Item $InstallRoot).PSDrive.Name);
+$diskInfo = Get-PSDrive -Name $installDrive
+if ($diskInfo.Free / 1MB -le 100) {
+ Say "There is not enough disk space on drive ${installDrive}:"
+ exit 0
+}
+
+$ZipPath = [System.IO.Path]::combine([System.IO.Path]::GetTempPath(), [System.IO.Path]::GetRandomFileName())
+Say-Verbose "Zip path: $ZipPath"
+
+$DownloadFailed = $false
+Say "Downloading link: $DownloadLink"
+try {
+ DownloadFile -Source $DownloadLink -OutPath $ZipPath
+}
+catch {
+ Say "Cannot download: $DownloadLink"
+ if ($LegacyDownloadLink) {
+ $DownloadLink = $LegacyDownloadLink
+ $ZipPath = [System.IO.Path]::combine([System.IO.Path]::GetTempPath(), [System.IO.Path]::GetRandomFileName())
+ Say-Verbose "Legacy zip path: $ZipPath"
+ Say "Downloading legacy link: $DownloadLink"
+ try {
+ DownloadFile -Source $DownloadLink -OutPath $ZipPath
+ }
+ catch {
+ Say "Cannot download: $DownloadLink"
+ $DownloadFailed = $true
+ }
+ }
+ else {
+ $DownloadFailed = $true
+ }
+}
+
+if ($DownloadFailed) {
+ throw "Could not find/download: `"$assetName`" with version = $SpecificVersion`nRefer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support"
+}
+
+Say "Extracting zip from $DownloadLink"
+Extract-Dotnet-Package -ZipPath $ZipPath -OutPath $InstallRoot
+
+# Check if the SDK version is now installed; if not, fail the installation.
+$isAssetInstalled = Is-Dotnet-Package-Installed -InstallRoot $InstallRoot -RelativePathToPackage $dotnetPackageRelativePath -SpecificVersion $SpecificVersion
+if (!$isAssetInstalled) {
+ throw "`"$assetName`" with version = $SpecificVersion failed to install with an unknown error."
+}
+
+Remove-Item $ZipPath
+
+Prepend-Sdk-InstallRoot-To-Path -InstallRoot $InstallRoot -BinFolderRelativePath $BinFolderRelativePath
+
+Say "Installation finished"
+exit 0
diff --git a/src/Misc/dotnet-install.sh b/src/Misc/dotnet-install.sh
new file mode 100755
index 00000000000..61337044fdb
--- /dev/null
+++ b/src/Misc/dotnet-install.sh
@@ -0,0 +1,1025 @@
+#!/usr/bin/env bash
+# Copyright (c) .NET Foundation and contributors. All rights reserved.
+# Licensed under the MIT license. See LICENSE file in the project root for full license information.
+#
+
+# Stop script on NZEC
+set -e
+# Stop script if unbound variable found (use ${var:-} if intentional)
+set -u
+# By default cmd1 | cmd2 returns exit code of cmd2 regardless of cmd1 success
+# This is causing it to fail
+set -o pipefail
+
+# Use in the the functions: eval $invocation
+invocation='say_verbose "Calling: ${yellow:-}${FUNCNAME[0]} ${green:-}$*${normal:-}"'
+
+# standard output may be used as a return value in the functions
+# we need a way to write text on the screen in the functions so that
+# it won't interfere with the return value.
+# Exposing stream 3 as a pipe to standard output of the script itself
+exec 3>&1
+
+# Setup some colors to use. These need to work in fairly limited shells, like the Ubuntu Docker container where there are only 8 colors.
+# See if stdout is a terminal
+if [ -t 1 ] && command -v tput > /dev/null; then
+ # see if it supports colors
+ ncolors=$(tput colors)
+ if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then
+ bold="$(tput bold || echo)"
+ normal="$(tput sgr0 || echo)"
+ black="$(tput setaf 0 || echo)"
+ red="$(tput setaf 1 || echo)"
+ green="$(tput setaf 2 || echo)"
+ yellow="$(tput setaf 3 || echo)"
+ blue="$(tput setaf 4 || echo)"
+ magenta="$(tput setaf 5 || echo)"
+ cyan="$(tput setaf 6 || echo)"
+ white="$(tput setaf 7 || echo)"
+ fi
+fi
+
+say_warning() {
+ printf "%b\n" "${yellow:-}dotnet_install: Warning: $1${normal:-}"
+}
+
+say_err() {
+ printf "%b\n" "${red:-}dotnet_install: Error: $1${normal:-}" >&2
+}
+
+say() {
+ # using stream 3 (defined in the beginning) to not interfere with stdout of functions
+ # which may be used as return value
+ printf "%b\n" "${cyan:-}dotnet-install:${normal:-} $1" >&3
+}
+
+say_verbose() {
+ if [ "$verbose" = true ]; then
+ say "$1"
+ fi
+}
+
+# This platform list is finite - if the SDK/Runtime has supported Linux distribution-specific assets,
+# then and only then should the Linux distribution appear in this list.
+# Adding a Linux distribution to this list does not imply distribution-specific support.
+get_legacy_os_name_from_platform() {
+ eval $invocation
+
+ platform="$1"
+ case "$platform" in
+ "centos.7")
+ echo "centos"
+ return 0
+ ;;
+ "debian.8")
+ echo "debian"
+ return 0
+ ;;
+ "debian.9")
+ echo "debian.9"
+ return 0
+ ;;
+ "fedora.23")
+ echo "fedora.23"
+ return 0
+ ;;
+ "fedora.24")
+ echo "fedora.24"
+ return 0
+ ;;
+ "fedora.27")
+ echo "fedora.27"
+ return 0
+ ;;
+ "fedora.28")
+ echo "fedora.28"
+ return 0
+ ;;
+ "opensuse.13.2")
+ echo "opensuse.13.2"
+ return 0
+ ;;
+ "opensuse.42.1")
+ echo "opensuse.42.1"
+ return 0
+ ;;
+ "opensuse.42.3")
+ echo "opensuse.42.3"
+ return 0
+ ;;
+ "rhel.7"*)
+ echo "rhel"
+ return 0
+ ;;
+ "ubuntu.14.04")
+ echo "ubuntu"
+ return 0
+ ;;
+ "ubuntu.16.04")
+ echo "ubuntu.16.04"
+ return 0
+ ;;
+ "ubuntu.16.10")
+ echo "ubuntu.16.10"
+ return 0
+ ;;
+ "ubuntu.18.04")
+ echo "ubuntu.18.04"
+ return 0
+ ;;
+ "alpine.3.4.3")
+ echo "alpine"
+ return 0
+ ;;
+ esac
+ return 1
+}
+
+get_linux_platform_name() {
+ eval $invocation
+
+ if [ -n "$runtime_id" ]; then
+ echo "${runtime_id%-*}"
+ return 0
+ else
+ if [ -e /etc/os-release ]; then
+ . /etc/os-release
+ echo "$ID.$VERSION_ID"
+ return 0
+ elif [ -e /etc/redhat-release ]; then
+ local redhatRelease=$( /dev/null 2>&1
+ return $?
+}
+
+
+check_min_reqs() {
+ local hasMinimum=false
+ if machine_has "curl"; then
+ hasMinimum=true
+ elif machine_has "wget"; then
+ hasMinimum=true
+ fi
+
+ if [ "$hasMinimum" = "false" ]; then
+ say_err "curl (recommended) or wget are required to download dotnet. Install missing prerequisite to proceed."
+ return 1
+ fi
+ return 0
+}
+
+check_pre_reqs() {
+ eval $invocation
+
+ if [ "${DOTNET_INSTALL_SKIP_PREREQS:-}" = "1" ]; then
+ return 0
+ fi
+
+ if [ "$(uname)" = "Linux" ]; then
+ if [ ! -x "$(command -v ldconfig)" ]; then
+ echo "ldconfig is not in PATH, trying /sbin/ldconfig."
+ LDCONFIG_COMMAND="/sbin/ldconfig"
+ else
+ LDCONFIG_COMMAND="ldconfig"
+ fi
+
+ local librarypath=${LD_LIBRARY_PATH:-}
+ LDCONFIG_COMMAND="$LDCONFIG_COMMAND -NXv ${librarypath//:/ }"
+
+ [ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep libunwind)" ] && say_warning "Unable to locate libunwind. Probable prerequisite missing; install libunwind."
+ [ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep libssl)" ] && say_warning "Unable to locate libssl. Probable prerequisite missing; install libssl."
+ [ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep libicu)" ] && say_warning "Unable to locate libicu. Probable prerequisite missing; install libicu."
+ [ -z "$($LDCONFIG_COMMAND 2>/dev/null | grep -F libcurl.so)" ] && say_warning "Unable to locate libcurl. Probable prerequisite missing; install libcurl."
+ fi
+
+ return 0
+}
+
+# args:
+# input - $1
+to_lowercase() {
+ #eval $invocation
+
+ echo "$1" | tr '[:upper:]' '[:lower:]'
+ return 0
+}
+
+# args:
+# input - $1
+remove_trailing_slash() {
+ #eval $invocation
+
+ local input="${1:-}"
+ echo "${input%/}"
+ return 0
+}
+
+# args:
+# input - $1
+remove_beginning_slash() {
+ #eval $invocation
+
+ local input="${1:-}"
+ echo "${input#/}"
+ return 0
+}
+
+# args:
+# root_path - $1
+# child_path - $2 - this parameter can be empty
+combine_paths() {
+ eval $invocation
+
+ # TODO: Consider making it work with any number of paths. For now:
+ if [ ! -z "${3:-}" ]; then
+ say_err "combine_paths: Function takes two parameters."
+ return 1
+ fi
+
+ local root_path="$(remove_trailing_slash "$1")"
+ local child_path="$(remove_beginning_slash "${2:-}")"
+ say_verbose "combine_paths: root_path=$root_path"
+ say_verbose "combine_paths: child_path=$child_path"
+ echo "$root_path/$child_path"
+ return 0
+}
+
+get_machine_architecture() {
+ eval $invocation
+
+ if command -v uname > /dev/null; then
+ CPUName=$(uname -m)
+ case $CPUName in
+ armv7l)
+ echo "arm"
+ return 0
+ ;;
+ aarch64)
+ echo "arm64"
+ return 0
+ ;;
+ esac
+ fi
+
+ # Always default to 'x64'
+ echo "x64"
+ return 0
+}
+
+# args:
+# architecture - $1
+get_normalized_architecture_from_architecture() {
+ eval $invocation
+
+ local architecture="$(to_lowercase "$1")"
+ case "$architecture" in
+ \)
+ echo "$(get_normalized_architecture_from_architecture "$(get_machine_architecture)")"
+ return 0
+ ;;
+ amd64|x64)
+ echo "x64"
+ return 0
+ ;;
+ arm)
+ echo "arm"
+ return 0
+ ;;
+ arm64)
+ echo "arm64"
+ return 0
+ ;;
+ esac
+
+ say_err "Architecture \`$architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/cli/issues"
+ return 1
+}
+
+# The version text returned from the feeds is a 1-line or 2-line string:
+# For the SDK and the dotnet runtime (2 lines):
+# Line 1: # commit_hash
+# Line 2: # 4-part version
+# For the aspnetcore runtime (1 line):
+# Line 1: # 4-part version
+
+# args:
+# version_text - stdin
+get_version_from_version_info() {
+ eval $invocation
+
+ cat | tail -n 1 | sed 's/\r$//'
+ return 0
+}
+
+# args:
+# install_root - $1
+# relative_path_to_package - $2
+# specific_version - $3
+is_dotnet_package_installed() {
+ eval $invocation
+
+ local install_root="$1"
+ local relative_path_to_package="$2"
+ local specific_version="${3//[$'\t\r\n']}"
+
+ local dotnet_package_path="$(combine_paths "$(combine_paths "$install_root" "$relative_path_to_package")" "$specific_version")"
+ say_verbose "is_dotnet_package_installed: dotnet_package_path=$dotnet_package_path"
+
+ if [ -d "$dotnet_package_path" ]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+# args:
+# azure_feed - $1
+# channel - $2
+# normalized_architecture - $3
+# coherent - $4
+get_latest_version_info() {
+ eval $invocation
+
+ local azure_feed="$1"
+ local channel="$2"
+ local normalized_architecture="$3"
+ local coherent="$4"
+
+ local version_file_url=null
+ if [[ "$runtime" == "dotnet" ]]; then
+ version_file_url="$uncached_feed/Runtime/$channel/latest.version"
+ elif [[ "$runtime" == "aspnetcore" ]]; then
+ version_file_url="$uncached_feed/aspnetcore/Runtime/$channel/latest.version"
+ elif [ -z "$runtime" ]; then
+ if [ "$coherent" = true ]; then
+ version_file_url="$uncached_feed/Sdk/$channel/latest.coherent.version"
+ else
+ version_file_url="$uncached_feed/Sdk/$channel/latest.version"
+ fi
+ else
+ say_err "Invalid value for \$runtime"
+ return 1
+ fi
+ say_verbose "get_latest_version_info: latest url: $version_file_url"
+
+ download "$version_file_url"
+ return $?
+}
+
+# args:
+# azure_feed - $1
+# channel - $2
+# normalized_architecture - $3
+# version - $4
+get_specific_version_from_version() {
+ eval $invocation
+
+ local azure_feed="$1"
+ local channel="$2"
+ local normalized_architecture="$3"
+ local version="$(to_lowercase "$4")"
+
+ case "$version" in
+ latest)
+ local version_info
+ version_info="$(get_latest_version_info "$azure_feed" "$channel" "$normalized_architecture" false)" || return 1
+ say_verbose "get_specific_version_from_version: version_info=$version_info"
+ echo "$version_info" | get_version_from_version_info
+ return 0
+ ;;
+ coherent)
+ local version_info
+ version_info="$(get_latest_version_info "$azure_feed" "$channel" "$normalized_architecture" true)" || return 1
+ say_verbose "get_specific_version_from_version: version_info=$version_info"
+ echo "$version_info" | get_version_from_version_info
+ return 0
+ ;;
+ *)
+ echo "$version"
+ return 0
+ ;;
+ esac
+}
+
+# args:
+# azure_feed - $1
+# channel - $2
+# normalized_architecture - $3
+# specific_version - $4
+construct_download_link() {
+ eval $invocation
+
+ local azure_feed="$1"
+ local channel="$2"
+ local normalized_architecture="$3"
+ local specific_version="${4//[$'\t\r\n']}"
+
+ local osname
+ osname="$(get_current_os_name)" || return 1
+
+ local download_link=null
+ if [[ "$runtime" == "dotnet" ]]; then
+ download_link="$azure_feed/Runtime/$specific_version/dotnet-runtime-$specific_version-$osname-$normalized_architecture.tar.gz"
+ elif [[ "$runtime" == "aspnetcore" ]]; then
+ download_link="$azure_feed/aspnetcore/Runtime/$specific_version/aspnetcore-runtime-$specific_version-$osname-$normalized_architecture.tar.gz"
+ elif [ -z "$runtime" ]; then
+ download_link="$azure_feed/Sdk/$specific_version/dotnet-sdk-$specific_version-$osname-$normalized_architecture.tar.gz"
+ else
+ return 1
+ fi
+
+ echo "$download_link"
+ return 0
+}
+
+# args:
+# azure_feed - $1
+# channel - $2
+# normalized_architecture - $3
+# specific_version - $4
+construct_legacy_download_link() {
+ eval $invocation
+
+ local azure_feed="$1"
+ local channel="$2"
+ local normalized_architecture="$3"
+ local specific_version="${4//[$'\t\r\n']}"
+
+ local distro_specific_osname
+ distro_specific_osname="$(get_legacy_os_name)" || return 1
+
+ local legacy_download_link=null
+ if [[ "$runtime" == "dotnet" ]]; then
+ legacy_download_link="$azure_feed/Runtime/$specific_version/dotnet-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz"
+ elif [ -z "$runtime" ]; then
+ legacy_download_link="$azure_feed/Sdk/$specific_version/dotnet-dev-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz"
+ else
+ return 1
+ fi
+
+ echo "$legacy_download_link"
+ return 0
+}
+
+get_user_install_path() {
+ eval $invocation
+
+ if [ ! -z "${DOTNET_INSTALL_DIR:-}" ]; then
+ echo "$DOTNET_INSTALL_DIR"
+ else
+ echo "$HOME/.dotnet"
+ fi
+ return 0
+}
+
+# args:
+# install_dir - $1
+resolve_installation_path() {
+ eval $invocation
+
+ local install_dir=$1
+ if [ "$install_dir" = "" ]; then
+ local user_install_path="$(get_user_install_path)"
+ say_verbose "resolve_installation_path: user_install_path=$user_install_path"
+ echo "$user_install_path"
+ return 0
+ fi
+
+ echo "$install_dir"
+ return 0
+}
+
+# args:
+# install_root - $1
+get_installed_version_info() {
+ eval $invocation
+
+ local install_root="$1"
+ local version_file="$(combine_paths "$install_root" "$local_version_file_relative_path")"
+ say_verbose "Local version file: $version_file"
+ if [ ! -z "$version_file" ] | [ -r "$version_file" ]; then
+ local version_info="$(cat "$version_file")"
+ echo "$version_info"
+ return 0
+ fi
+
+ say_verbose "Local version file not found."
+ return 0
+}
+
+# args:
+# relative_or_absolute_path - $1
+get_absolute_path() {
+ eval $invocation
+
+ local relative_or_absolute_path=$1
+ echo "$(cd "$(dirname "$1")" && pwd -P)/$(basename "$1")"
+ return 0
+}
+
+# args:
+# input_files - stdin
+# root_path - $1
+# out_path - $2
+# override - $3
+copy_files_or_dirs_from_list() {
+ eval $invocation
+
+ local root_path="$(remove_trailing_slash "$1")"
+ local out_path="$(remove_trailing_slash "$2")"
+ local override="$3"
+ local osname="$(get_current_os_name)"
+ local override_switch=$(
+ if [ "$override" = false ]; then
+ if [[ "$osname" == "linux-musl" ]]; then
+ printf -- "-u";
+ else
+ printf -- "-n";
+ fi
+ fi)
+
+ cat | uniq | while read -r file_path; do
+ local path="$(remove_beginning_slash "${file_path#$root_path}")"
+ local target="$out_path/$path"
+ if [ "$override" = true ] || (! ([ -d "$target" ] || [ -e "$target" ])); then
+ mkdir -p "$out_path/$(dirname "$path")"
+ if [ -d "$target" ]; then
+ rm -rf "$target"
+ fi
+ cp -R $override_switch "$root_path/$path" "$target"
+ fi
+ done
+}
+
+# args:
+# zip_path - $1
+# out_path - $2
+extract_dotnet_package() {
+ eval $invocation
+
+ local zip_path="$1"
+ local out_path="$2"
+
+ local temp_out_path="$(mktemp -d "$temporary_file_template")"
+
+ local failed=false
+ tar -xzf "$zip_path" -C "$temp_out_path" > /dev/null || failed=true
+
+ local folders_with_version_regex='^.*/[0-9]+\.[0-9]+[^/]+/'
+ find "$temp_out_path" -type f | grep -Eo "$folders_with_version_regex" | sort | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" false
+ find "$temp_out_path" -type f | grep -Ev "$folders_with_version_regex" | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" "$override_non_versioned_files"
+
+ rm -rf "$temp_out_path"
+
+ if [ "$failed" = true ]; then
+ say_err "Extraction failed"
+ return 1
+ fi
+}
+
+# args:
+# remote_path - $1
+# [out_path] - $2 - stdout if not provided
+download() {
+ eval $invocation
+
+ local remote_path="$1"
+ local out_path="${2:-}"
+
+ if [[ "$remote_path" != "http"* ]]; then
+ cp "$remote_path" "$out_path"
+ return $?
+ fi
+
+ local failed=false
+ if machine_has "curl"; then
+ downloadcurl "$remote_path" "$out_path" || failed=true
+ elif machine_has "wget"; then
+ downloadwget "$remote_path" "$out_path" || failed=true
+ else
+ failed=true
+ fi
+ if [ "$failed" = true ]; then
+ say_verbose "Download failed: $remote_path"
+ return 1
+ fi
+ return 0
+}
+
+downloadcurl() {
+ eval $invocation
+ local remote_path="$1"
+ local out_path="${2:-}"
+
+ # Append feed_credential as late as possible before calling curl to avoid logging feed_credential
+ remote_path="${remote_path}${feed_credential}"
+
+ local failed=false
+ if [ -z "$out_path" ]; then
+ curl --retry 10 -sSL -f --create-dirs "$remote_path" || failed=true
+ else
+ curl --retry 10 -sSL -f --create-dirs -o "$out_path" "$remote_path" || failed=true
+ fi
+ if [ "$failed" = true ]; then
+ say_verbose "Curl download failed"
+ return 1
+ fi
+ return 0
+}
+
+downloadwget() {
+ eval $invocation
+ local remote_path="$1"
+ local out_path="${2:-}"
+
+ # Append feed_credential as late as possible before calling wget to avoid logging feed_credential
+ remote_path="${remote_path}${feed_credential}"
+
+ local failed=false
+ if [ -z "$out_path" ]; then
+ wget -q --tries 10 -O - "$remote_path" || failed=true
+ else
+ wget --tries 10 -O "$out_path" "$remote_path" || failed=true
+ fi
+ if [ "$failed" = true ]; then
+ say_verbose "Wget download failed"
+ return 1
+ fi
+ return 0
+}
+
+calculate_vars() {
+ eval $invocation
+ valid_legacy_download_link=true
+
+ normalized_architecture="$(get_normalized_architecture_from_architecture "$architecture")"
+ say_verbose "normalized_architecture=$normalized_architecture"
+
+ specific_version="$(get_specific_version_from_version "$azure_feed" "$channel" "$normalized_architecture" "$version")"
+ say_verbose "specific_version=$specific_version"
+ if [ -z "$specific_version" ]; then
+ say_err "Could not resolve version information."
+ return 1
+ fi
+
+ download_link="$(construct_download_link "$azure_feed" "$channel" "$normalized_architecture" "$specific_version")"
+ say_verbose "Constructed primary named payload URL: $download_link"
+
+ legacy_download_link="$(construct_legacy_download_link "$azure_feed" "$channel" "$normalized_architecture" "$specific_version")" || valid_legacy_download_link=false
+
+ if [ "$valid_legacy_download_link" = true ]; then
+ say_verbose "Constructed legacy named payload URL: $legacy_download_link"
+ else
+ say_verbose "Cound not construct a legacy_download_link; omitting..."
+ fi
+
+ install_root="$(resolve_installation_path "$install_dir")"
+ say_verbose "InstallRoot: $install_root"
+}
+
+install_dotnet() {
+ eval $invocation
+ local download_failed=false
+ local asset_name=''
+ local asset_relative_path=''
+
+ if [[ "$runtime" == "dotnet" ]]; then
+ asset_relative_path="shared/Microsoft.NETCore.App"
+ asset_name=".NET Core Runtime"
+ elif [[ "$runtime" == "aspnetcore" ]]; then
+ asset_relative_path="shared/Microsoft.AspNetCore.App"
+ asset_name="ASP.NET Core Runtime"
+ elif [ -z "$runtime" ]; then
+ asset_relative_path="sdk"
+ asset_name=".NET Core SDK"
+ else
+ say_err "Invalid value for \$runtime"
+ return 1
+ fi
+
+ # Check if the SDK version is already installed.
+ if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$specific_version"; then
+ say "$asset_name version $specific_version is already installed."
+ return 0
+ fi
+
+ mkdir -p "$install_root"
+ zip_path="$(mktemp "$temporary_file_template")"
+ say_verbose "Zip path: $zip_path"
+
+ say "Downloading link: $download_link"
+
+ # Failures are normal in the non-legacy case for ultimately legacy downloads.
+ # Do not output to stderr, since output to stderr is considered an error.
+ download "$download_link" "$zip_path" 2>&1 || download_failed=true
+
+ # if the download fails, download the legacy_download_link
+ if [ "$download_failed" = true ]; then
+ say "Cannot download: $download_link"
+
+ if [ "$valid_legacy_download_link" = true ]; then
+ download_failed=false
+ download_link="$legacy_download_link"
+ zip_path="$(mktemp "$temporary_file_template")"
+ say_verbose "Legacy zip path: $zip_path"
+ say "Downloading legacy link: $download_link"
+ download "$download_link" "$zip_path" 2>&1 || download_failed=true
+
+ if [ "$download_failed" = true ]; then
+ say "Cannot download: $download_link"
+ fi
+ fi
+ fi
+
+ if [ "$download_failed" = true ]; then
+ say_err "Could not find/download: \`$asset_name\` with version = $specific_version"
+ say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support"
+ return 1
+ fi
+
+ say "Extracting zip from $download_link"
+ extract_dotnet_package "$zip_path" "$install_root"
+
+ # Check if the SDK version is now installed; if not, fail the installation.
+ if ! is_dotnet_package_installed "$install_root" "$asset_relative_path" "$specific_version"; then
+ say_err "\`$asset_name\` with version = $specific_version failed to install with an unknown error."
+ return 1
+ fi
+
+ return 0
+}
+
+args=("$@")
+
+local_version_file_relative_path="/.version"
+bin_folder_relative_path=""
+temporary_file_template="${TMPDIR:-/tmp}/dotnet.XXXXXXXXX"
+
+channel="LTS"
+version="Latest"
+install_dir=""
+architecture=""
+dry_run=false
+no_path=false
+no_cdn=false
+azure_feed="https://dotnetcli.azureedge.net/dotnet"
+uncached_feed="https://dotnetcli.blob.core.windows.net/dotnet"
+feed_credential=""
+verbose=false
+runtime=""
+runtime_id=""
+override_non_versioned_files=true
+non_dynamic_parameters=""
+
+while [ $# -ne 0 ]
+do
+ name="$1"
+ case "$name" in
+ -c|--channel|-[Cc]hannel)
+ shift
+ channel="$1"
+ ;;
+ -v|--version|-[Vv]ersion)
+ shift
+ version="$1"
+ ;;
+ -i|--install-dir|-[Ii]nstall[Dd]ir)
+ shift
+ install_dir="$1"
+ ;;
+ --arch|--architecture|-[Aa]rch|-[Aa]rchitecture)
+ shift
+ architecture="$1"
+ ;;
+ --shared-runtime|-[Ss]hared[Rr]untime)
+ say_warning "The --shared-runtime flag is obsolete and may be removed in a future version of this script. The recommended usage is to specify '--runtime dotnet'."
+ if [ -z "$runtime" ]; then
+ runtime="dotnet"
+ fi
+ ;;
+ --runtime|-[Rr]untime)
+ shift
+ runtime="$1"
+ if [[ "$runtime" != "dotnet" ]] && [[ "$runtime" != "aspnetcore" ]]; then
+ say_err "Unsupported value for --runtime: '$1'. Valid values are 'dotnet' and 'aspnetcore'."
+ if [[ "$runtime" == "windowsdesktop" ]]; then
+ say_err "WindowsDesktop archives are manufactured for Windows platforms only."
+ fi
+ exit 1
+ fi
+ ;;
+ --dry-run|-[Dd]ry[Rr]un)
+ dry_run=true
+ ;;
+ --no-path|-[Nn]o[Pp]ath)
+ no_path=true
+ non_dynamic_parameters+=" $name"
+ ;;
+ --verbose|-[Vv]erbose)
+ verbose=true
+ non_dynamic_parameters+=" $name"
+ ;;
+ --no-cdn|-[Nn]o[Cc]dn)
+ no_cdn=true
+ non_dynamic_parameters+=" $name"
+ ;;
+ --azure-feed|-[Aa]zure[Ff]eed)
+ shift
+ azure_feed="$1"
+ non_dynamic_parameters+=" $name "\""$1"\"""
+ ;;
+ --uncached-feed|-[Uu]ncached[Ff]eed)
+ shift
+ uncached_feed="$1"
+ non_dynamic_parameters+=" $name "\""$1"\"""
+ ;;
+ --feed-credential|-[Ff]eed[Cc]redential)
+ shift
+ feed_credential="$1"
+ non_dynamic_parameters+=" $name "\""$1"\"""
+ ;;
+ --runtime-id|-[Rr]untime[Ii]d)
+ shift
+ runtime_id="$1"
+ non_dynamic_parameters+=" $name "\""$1"\"""
+ ;;
+ --skip-non-versioned-files|-[Ss]kip[Nn]on[Vv]ersioned[Ff]iles)
+ override_non_versioned_files=false
+ non_dynamic_parameters+=" $name"
+ ;;
+ -?|--?|-h|--help|-[Hh]elp)
+ script_name="$(basename "$0")"
+ echo ".NET Tools Installer"
+ echo "Usage: $script_name [-c|--channel ] [-v|--version ] [-p|--prefix ]"
+ echo " $script_name -h|-?|--help"
+ echo ""
+ echo "$script_name is a simple command line interface for obtaining dotnet cli."
+ echo ""
+ echo "Options:"
+ echo " -c,--channel Download from the channel specified, Defaults to \`$channel\`."
+ echo " -Channel"
+ echo " Possible values:"
+ echo " - Current - most current release"
+ echo " - LTS - most current supported release"
+ echo " - 2-part version in a format A.B - represents a specific release"
+ echo " examples: 2.0; 1.0"
+ echo " - Branch name"
+ echo " examples: release/2.0.0; Master"
+ echo " Note: The version parameter overrides the channel parameter."
+ echo " -v,--version Use specific VERSION, Defaults to \`$version\`."
+ echo " -Version"
+ echo " Possible values:"
+ echo " - latest - most latest build on specific channel"
+ echo " - coherent - most latest coherent build on specific channel"
+ echo " coherent applies only to SDK downloads"
+ echo " - 3-part version in a format A.B.C - represents specific version of build"
+ echo " examples: 2.0.0-preview2-006120; 1.1.0"
+ echo " -i,--install-dir Install under specified location (see Install Location below)"
+ echo " -InstallDir"
+ echo " --architecture Architecture of dotnet binaries to be installed, Defaults to \`$architecture\`."
+ echo " --arch,-Architecture,-Arch"
+ echo " Possible values: x64, arm, and arm64"
+ echo " --runtime Installs a shared runtime only, without the SDK."
+ echo " -Runtime"
+ echo " Possible values:"
+ echo " - dotnet - the Microsoft.NETCore.App shared runtime"
+ echo " - aspnetcore - the Microsoft.AspNetCore.App shared runtime"
+ echo " --skip-non-versioned-files Skips non-versioned files if they already exist, such as the dotnet executable."
+ echo " -SkipNonVersionedFiles"
+ echo " --dry-run,-DryRun Do not perform installation. Display download link."
+ echo " --no-path, -NoPath Do not set PATH for the current process."
+ echo " --verbose,-Verbose Display diagnostics information."
+ echo " --azure-feed,-AzureFeed Azure feed location. Defaults to $azure_feed, This parameter typically is not changed by the user."
+ echo " --uncached-feed,-UncachedFeed Uncached feed location. This parameter typically is not changed by the user."
+ echo " --no-cdn,-NoCdn Disable downloading from the Azure CDN, and use the uncached feed directly."
+ echo " --feed-credential,-FeedCredential Azure feed shared access token. This parameter typically is not specified."
+ echo " --runtime-id Installs the .NET Tools for the given platform (use linux-x64 for portable linux)."
+ echo " -RuntimeId"
+ echo " -?,--?,-h,--help,-Help Shows this help message"
+ echo ""
+ echo "Obsolete parameters:"
+ echo " --shared-runtime The recommended alternative is '--runtime dotnet'."
+ echo " -SharedRuntime Installs just the shared runtime bits, not the entire SDK."
+ echo ""
+ echo "Install Location:"
+ echo " Location is chosen in following order:"
+ echo " - --install-dir option"
+ echo " - Environmental variable DOTNET_INSTALL_DIR"
+ echo " - $HOME/.dotnet"
+ exit 0
+ ;;
+ *)
+ say_err "Unknown argument \`$name\`"
+ exit 1
+ ;;
+ esac
+
+ shift
+done
+
+if [ "$no_cdn" = true ]; then
+ azure_feed="$uncached_feed"
+fi
+
+check_min_reqs
+calculate_vars
+script_name=$(basename "$0")
+
+if [ "$dry_run" = true ]; then
+ say "Payload URLs:"
+ say "Primary named payload URL: $download_link"
+ if [ "$valid_legacy_download_link" = true ]; then
+ say "Legacy named payload URL: $legacy_download_link"
+ fi
+ repeatable_command="./$script_name --version "\""$specific_version"\"" --install-dir "\""$install_root"\"" --architecture "\""$normalized_architecture"\"""
+ if [[ "$runtime" == "dotnet" ]]; then
+ repeatable_command+=" --runtime "\""dotnet"\"""
+ elif [[ "$runtime" == "aspnetcore" ]]; then
+ repeatable_command+=" --runtime "\""aspnetcore"\"""
+ fi
+ repeatable_command+="$non_dynamic_parameters"
+ say "Repeatable invocation: $repeatable_command"
+ exit 0
+fi
+
+check_pre_reqs
+install_dotnet
+
+bin_path="$(get_absolute_path "$(combine_paths "$install_root" "$bin_folder_relative_path")")"
+if [ "$no_path" = false ]; then
+ say "Adding to current process PATH: \`$bin_path\`. Note: This change will be visible only when sourcing script."
+ export PATH="$bin_path":"$PATH"
+else
+ say "Binaries of dotnet can be found in $bin_path"
+fi
+
+say "Installation finished successfully."
diff --git a/src/Misc/externals.sh b/src/Misc/externals.sh
new file mode 100755
index 00000000000..0f37cc254ba
--- /dev/null
+++ b/src/Misc/externals.sh
@@ -0,0 +1,148 @@
+#!/bin/bash
+PACKAGERUNTIME=$1
+PRECACHE=$2
+
+NODE_URL=https://nodejs.org/dist
+NODE12_VERSION="12.4.0"
+
+get_abs_path() {
+ # exploits the fact that pwd will print abs path when no args
+ echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
+}
+
+LAYOUT_DIR=$(get_abs_path "$(dirname $0)/../../_layout")
+DOWNLOAD_DIR="$(get_abs_path "$(dirname $0)/../../_downloads")/netcore2x"
+
+function failed() {
+ local error=${1:-Undefined error}
+ echo "Failed: $error" >&2
+ exit 1
+}
+
+function checkRC() {
+ local rc=$?
+ if [ $rc -ne 0 ]; then
+ failed "${1} failed with return code $rc"
+ fi
+}
+
+function acquireExternalTool() {
+ local download_source=$1 # E.g. https://github.com/microsoft/vswhere/releases/download/2.6.7/vswhere.exe
+ local target_dir="$LAYOUT_DIR/externals/$2" # E.g. $LAYOUT_DIR/externals/vswhere
+ local fix_nested_dir=$3 # Flag that indicates whether to move nested contents up one directory.
+
+ # Extract the portion of the URL after the protocol. E.g. github.com/microsoft/vswhere/releases/download/2.6.7/vswhere.exe
+ local relative_url="${download_source#*://}"
+
+ # Check if the download already exists.
+ local download_target="$DOWNLOAD_DIR/$relative_url"
+ local download_basename="$(basename "$download_target")"
+ local download_dir="$(dirname "$download_target")"
+
+ if [[ "$PRECACHE" != "" ]]; then
+ if [ -f "$download_target" ]; then
+ echo "Download exists: $download_basename"
+ else
+ # Delete any previous partial file.
+ local partial_target="$DOWNLOAD_DIR/partial/$download_basename"
+ mkdir -p "$(dirname "$partial_target")" || checkRC 'mkdir'
+ if [ -f "$partial_target" ]; then
+ rm "$partial_target" || checkRC 'rm'
+ fi
+
+ # Download from source to the partial file.
+ echo "Downloading $download_source"
+ mkdir -p "$(dirname "$download_target")" || checkRC 'mkdir'
+ # curl -f Fail silently (no output at all) on HTTP errors (H)
+ # -k Allow connections to SSL sites without certs (H)
+ # -S Show error. With -s, make curl show errors when they occur
+ # -L Follow redirects (H)
+ # -o FILE Write to FILE instead of stdout
+ curl -fkSL -o "$partial_target" "$download_source" 2>"${download_target}_download.log" || checkRC 'curl'
+
+ # Move the partial file to the download target.
+ mv "$partial_target" "$download_target" || checkRC 'mv'
+
+ # Extract to current directory
+ # Ensure we can extract those files
+ # We might use them during dev.sh
+ if [[ "$download_basename" == *.zip ]]; then
+ # Extract the zip.
+ echo "Testing zip"
+ unzip "$download_target" -d "$download_dir" > /dev/null
+ local rc=$?
+ if [[ $rc -ne 0 && $rc -ne 1 ]]; then
+ failed "unzip failed with return code $rc"
+ fi
+ elif [[ "$download_basename" == *.tar.gz ]]; then
+ # Extract the tar gz.
+ echo "Testing tar gz"
+ tar xzf "$download_target" -C "$download_dir" > /dev/null || checkRC 'tar'
+ fi
+ fi
+ else
+ # Extract to layout.
+ mkdir -p "$target_dir" || checkRC 'mkdir'
+ local nested_dir=""
+ if [[ "$download_basename" == *.zip ]]; then
+ # Extract the zip.
+ echo "Extracting zip to layout"
+ unzip "$download_target" -d "$target_dir" > /dev/null
+ local rc=$?
+ if [[ $rc -ne 0 && $rc -ne 1 ]]; then
+ failed "unzip failed with return code $rc"
+ fi
+
+ # Capture the nested directory path if the fix_nested_dir flag is set.
+ if [[ "$fix_nested_dir" == "fix_nested_dir" ]]; then
+ nested_dir="${download_basename%.zip}" # Remove the trailing ".zip".
+ fi
+ elif [[ "$download_basename" == *.tar.gz ]]; then
+ # Extract the tar gz.
+ echo "Extracting tar gz to layout"
+ tar xzf "$download_target" -C "$target_dir" > /dev/null || checkRC 'tar'
+
+ # Capture the nested directory path if the fix_nested_dir flag is set.
+ if [[ "$fix_nested_dir" == "fix_nested_dir" ]]; then
+ nested_dir="${download_basename%.tar.gz}" # Remove the trailing ".tar.gz".
+ fi
+ else
+ # Copy the file.
+ echo "Copying to layout"
+ cp "$download_target" "$target_dir/" || checkRC 'cp'
+ fi
+
+ # Fixup the nested directory.
+ if [[ "$nested_dir" != "" ]]; then
+ if [ -d "$target_dir/$nested_dir" ]; then
+ mv "$target_dir/$nested_dir"/* "$target_dir/" || checkRC 'mv'
+ rmdir "$target_dir/$nested_dir" || checkRC 'rmdir'
+ fi
+ fi
+ fi
+}
+
+# Download the external tools only for Windows.
+if [[ "$PACKAGERUNTIME" == "win-x64" ]]; then
+ acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/win-x64/node.exe" node12/bin
+ acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/win-x64/node.lib" node12/bin
+ if [[ "$PRECACHE" != "" ]]; then
+ acquireExternalTool "https://github.com/microsoft/vswhere/releases/download/2.6.7/vswhere.exe" vswhere
+ fi
+fi
+
+# Download the external tools only for OSX.
+if [[ "$PACKAGERUNTIME" == "osx-x64" ]]; then
+ acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-darwin-x64.tar.gz" node12 fix_nested_dir
+fi
+
+# Download the external tools common across Linux PACKAGERUNTIMEs (excluding OSX).
+if [[ "$PACKAGERUNTIME" == "linux-x64" || "$PACKAGERUNTIME" == "rhel.6-x64" ]]; then
+ acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-x64.tar.gz" node12 fix_nested_dir
+ # TODO: Repath this blob to use a consistent version format (_ vs .)
+ acquireExternalTool "https://vstsagenttools.blob.core.windows.net/tools/nodejs/12_4_0/alpine/node-v${NODE12_VERSION}-alpine.tar.gz" node12_alpine
+fi
+
+if [[ "$PACKAGERUNTIME" == "linux-arm" ]]; then
+ acquireExternalTool "$NODE_URL/v${NODE12_VERSION}/node-v${NODE12_VERSION}-linux-armv7l.tar.gz" node12 fix_nested_dir
+fi
diff --git a/src/Misc/layoutbin/RunnerService.js b/src/Misc/layoutbin/RunnerService.js
new file mode 100644
index 00000000000..9af34397e29
--- /dev/null
+++ b/src/Misc/layoutbin/RunnerService.js
@@ -0,0 +1,91 @@
+#!/usr/bin/env node
+// Copyright (c) GitHub. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+var childProcess = require("child_process");
+var path = require("path")
+
+var supported = ['linux', 'darwin']
+
+if (supported.indexOf(process.platform) == -1) {
+ console.log('Unsupported platform: ' + process.platform);
+ console.log('Supported platforms are: ' + supported.toString());
+ process.exit(1);
+}
+
+var stopping = false;
+var listener = null;
+
+var runService = function() {
+ var listenerExePath = path.join(__dirname, '../bin/Runner.Listener');
+ var interactive = process.argv[2] === "interactive";
+
+ if(!stopping) {
+ try {
+ if (interactive) {
+ console.log('Starting Runner listener interactively');
+ listener = childProcess.spawn(listenerExePath, ['run'], { env: process.env });
+ } else {
+ console.log('Starting Runner listener with startup type: service');
+ listener = childProcess.spawn(listenerExePath, ['run', '--startuptype', 'service'], { env: process.env });
+ }
+
+ console.log('Started listener process');
+
+ listener.stdout.on('data', (data) => {
+ process.stdout.write(data.toString('utf8'));
+ });
+
+ listener.stderr.on('data', (data) => {
+ process.stdout.write(data.toString('utf8'));
+ });
+
+ listener.on('close', (code) => {
+ console.log(`Runner listener exited with error code ${code}`);
+
+ if (code === 0) {
+ console.log('Runner listener exit with 0 return code, stop the service, no retry needed.');
+ stopping = true;
+ } else if (code === 1) {
+ console.log('Runner listener exit with terminated error, stop the service, no retry needed.');
+ stopping = true;
+ } else if (code === 2) {
+ console.log('Runner listener exit with retryable error, re-launch runner in 5 seconds.');
+ } else if (code === 3) {
+ console.log('Runner listener exit because of updating, re-launch runner in 5 seconds.');
+ } else {
+ console.log('Runner listener exit with undefined return code, re-launch runner in 5 seconds.');
+ }
+
+ if(!stopping) {
+ setTimeout(runService, 5000);
+ }
+ });
+
+ } catch(ex) {
+ console.log(ex);
+ }
+ }
+}
+
+runService();
+console.log('Started running service');
+
+var gracefulShutdown = function(code) {
+ console.log('Shutting down runner listener');
+ stopping = true;
+ if (listener) {
+ console.log('Sending SIGINT to runner listener to stop');
+ listener.kill('SIGINT');
+
+ // TODO wait for 30 seconds and send a SIGKILL
+ }
+}
+
+process.on('SIGINT', () => {
+ gracefulShutdown(0);
+});
+
+process.on('SIGTERM', () => {
+ gracefulShutdown(0);
+});
diff --git a/src/Misc/layoutbin/actions.runner.plist.template b/src/Misc/layoutbin/actions.runner.plist.template
new file mode 100644
index 00000000000..351c1100142
--- /dev/null
+++ b/src/Misc/layoutbin/actions.runner.plist.template
@@ -0,0 +1,27 @@
+
+
+
+
+ Label
+ {{SvcName}}
+ ProgramArguments
+
+ {{RunnerRoot}}/runsvc.sh
+
+ UserName
+ {{User}}
+ WorkingDirectory
+ {{RunnerRoot}}
+ RunAtLoad
+
+ StandardOutPath
+ {{UserHome}}/Library/Logs/{{SvcName}}/stdout.log
+ StandardErrorPath
+ {{UserHome}}/Library/Logs/{{SvcName}}/stderr.log
+ EnvironmentVariables
+
+ ACTIONS_RUNNER_SVC
+ 1
+
+
+
diff --git a/src/Misc/layoutbin/actions.runner.service.template b/src/Misc/layoutbin/actions.runner.service.template
new file mode 100644
index 00000000000..4dcec7e368c
--- /dev/null
+++ b/src/Misc/layoutbin/actions.runner.service.template
@@ -0,0 +1,14 @@
+[Unit]
+Description={{Description}}
+After=network.target
+
+[Service]
+ExecStart={{RunnerRoot}}/runsvc.sh
+User={{User}}
+WorkingDirectory={{RunnerRoot}}
+KillMode=process
+KillSignal=SIGTERM
+TimeoutStopSec=5min
+
+[Install]
+WantedBy=multi-user.target
diff --git a/src/Misc/layoutbin/darwin.svc.sh.template b/src/Misc/layoutbin/darwin.svc.sh.template
new file mode 100644
index 00000000000..5210eb94d81
--- /dev/null
+++ b/src/Misc/layoutbin/darwin.svc.sh.template
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+SVC_NAME="{{SvcNameVar}}"
+SVC_DESCRIPTION="{{SvcDescription}}"
+
+user_id=`id -u`
+
+# launchctl should not run as sudo for launch runners
+if [ $user_id -eq 0 ]; then
+ echo "Must not run with sudo"
+ exit 1
+fi
+
+SVC_CMD=$1
+RUNNER_ROOT=`pwd`
+
+LAUNCH_PATH="${HOME}/Library/LaunchAgents"
+PLIST_PATH="${LAUNCH_PATH}/${SVC_NAME}.plist"
+TEMPLATE_PATH=./bin/actions.runner.plist.template
+TEMP_PATH=./bin/actions.runner.plist.temp
+CONFIG_PATH=.service
+
+function failed()
+{
+ local error=${1:-Undefined error}
+ echo "Failed: $error" >&2
+ exit 1
+}
+
+if [ ! -f "${TEMPLATE_PATH}" ]; then
+ failed "Must run from runner root or install is corrupt"
+fi
+
+function install()
+{
+ echo "Creating launch runner in ${PLIST_PATH}"
+
+ if [ ! -d "${LAUNCH_PATH}" ]; then
+ mkdir ${LAUNCH_PATH}
+ fi
+
+ if [ -f "${PLIST_PATH}" ]; then
+ failed "error: exists ${PLIST_PATH}"
+ fi
+
+ if [ -f "${TEMP_PATH}" ]; then
+ rm "${TEMP_PATH}" || failed "failed to delete ${TEMP_PATH}"
+ fi
+
+ log_path="${HOME}/Library/Logs/${SVC_NAME}"
+ echo "Creating ${log_path}"
+ mkdir -p "${log_path}" || failed "failed to create ${log_path}"
+
+ echo Creating ${PLIST_PATH}
+ sed "s/{{User}}/${SUDO_USER:-$USER}/g; s/{{SvcName}}/$SVC_NAME/g; s@{{RunnerRoot}}@${RUNNER_ROOT}@g; s@{{UserHome}}@$HOME@g;" "${TEMPLATE_PATH}" > "${TEMP_PATH}" || failed "failed to create replacement temp file"
+ mv "${TEMP_PATH}" "${PLIST_PATH}" || failed "failed to copy plist"
+
+ # Since we started with sudo, runsvc.sh will be owned by root. Change this to current login user.
+ echo Creating runsvc.sh
+ cp ./bin/runsvc.sh ./runsvc.sh || failed "failed to copy runsvc.sh"
+ chmod u+x ./runsvc.sh || failed "failed to set permission for runsvc.sh"
+
+ echo Creating ${CONFIG_PATH}
+ echo "${PLIST_PATH}" > ${CONFIG_PATH} || failed "failed to create .Service file"
+
+ echo "svc install complete"
+}
+
+function start()
+{
+ echo "starting ${SVC_NAME}"
+ launchctl load -w "${PLIST_PATH}" || failed "failed to load ${PLIST_PATH}"
+ status
+}
+
+function stop()
+{
+ echo "stopping ${SVC_NAME}"
+ launchctl unload "${PLIST_PATH}" || failed "failed to unload ${PLIST_PATH}"
+ status
+}
+
+function uninstall()
+{
+ echo "uninstalling ${SVC_NAME}"
+ stop
+ rm "${PLIST_PATH}" || failed "failed to delete ${PLIST_PATH}"
+ if [ -f "${CONFIG_PATH}" ]; then
+ rm "${CONFIG_PATH}" || failed "failed to delete ${CONFIG_PATH}"
+ fi
+}
+
+function status()
+{
+ echo "status ${SVC_NAME}:"
+ if [ -f "${PLIST_PATH}" ]; then
+ echo
+ echo "${PLIST_PATH}"
+ else
+ echo
+ echo "not installed"
+ echo
+ return
+ fi
+
+ echo
+ status_out=`launchctl list | grep "${SVC_NAME}"`
+ if [ ! -z "$status_out" ]; then
+ echo Started:
+ echo $status_out
+ echo
+ else
+ echo Stopped
+ echo
+ fi
+}
+
+function usage()
+{
+ echo
+ echo Usage:
+ echo "./svc.sh [install, start, stop, status, uninstall]"
+ echo
+}
+
+case $SVC_CMD in
+ "install") install;;
+ "status") status;;
+ "uninstall") uninstall;;
+ "start") start;;
+ "stop") stop;;
+ *) usage;;
+esac
+
+exit 0
diff --git a/src/Misc/layoutbin/installdependencies.sh b/src/Misc/layoutbin/installdependencies.sh
new file mode 100755
index 00000000000..0375c0a43f2
--- /dev/null
+++ b/src/Misc/layoutbin/installdependencies.sh
@@ -0,0 +1,298 @@
+#!/bin/bash
+
+user_id=`id -u`
+
+if [ $user_id -ne 0 ]; then
+ echo "Need to run with sudo privilege"
+ exit 1
+fi
+
+# Determine OS type
+# Debian based OS (Debian, Ubuntu, Linux Mint) has /etc/debian_version
+# Fedora based OS (Fedora, Redhat, Centos, Oracle Linux 7) has /etc/redhat-release
+# SUSE based OS (OpenSUSE, SUSE Enterprise) has ID_LIKE=suse in /etc/os-release
+
+function print_errormessage()
+{
+ echo "Can't install dotnet core dependencies."
+ echo "You can manually install all required dependencies based on following documentation"
+ echo "https://docs.microsoft.com/en-us/dotnet/core/linux-prerequisites?tabs=netcore2x"
+}
+
+function print_rhel6message()
+{
+ echo "We did our best effort to install dotnet core dependencies"
+ echo "However, there are some dependencies which require manual installation"
+ echo "You can install all remaining required dependencies based on the following documentation"
+ echo "https://github.com/dotnet/core/blob/master/Documentation/build-and-install-rhel6-prerequisites.md"
+}
+
+function print_rhel6errormessage()
+{
+ echo "We couldn't install dotnet core dependencies"
+ echo "You can manually install all required dependencies based on following documentation"
+ echo "https://docs.microsoft.com/en-us/dotnet/core/linux-prerequisites?tabs=netcore2x"
+ echo "In addition, there are some dependencies which require manual installation. Please follow this documentation"
+ echo "https://github.com/dotnet/core/blob/master/Documentation/build-and-install-rhel6-prerequisites.md"
+}
+
+if [ -e /etc/os-release ]
+then
+ echo "--------OS Information--------"
+ cat /etc/os-release
+ echo "------------------------------"
+
+ if [ -e /etc/debian_version ]
+ then
+ echo "The current OS is Debian based"
+ echo "--------Debian Version--------"
+ cat /etc/debian_version
+ echo "------------------------------"
+
+ # prefer apt over apt-get
+ command -v apt
+ if [ $? -eq 0 ]
+ then
+ apt update && apt install -y liblttng-ust0 libkrb5-3 zlib1g
+ if [ $? -ne 0 ]
+ then
+ echo "'apt' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+
+ # ubuntu 18 uses libcurl4
+ # ubuntu 14, 16 and other linux use libcurl3
+ apt install -y libcurl3 || apt install -y libcurl4
+ if [ $? -ne 0 ]
+ then
+ echo "'apt' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+
+ # debian 9 use libssl1.0.2
+ # other debian linux use libssl1.0.0
+ apt install -y libssl1.0.0 || apt install -y libssl1.0.2
+ if [ $? -ne 0 ]
+ then
+ echo "'apt' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+
+ # libicu version prefer: libicu52 -> libicu55 -> libicu57 -> libicu60
+ apt install -y libicu52 || apt install -y libicu55 || apt install -y libicu57 || apt install -y libicu60
+ if [ $? -ne 0 ]
+ then
+ echo "'apt' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ command -v apt-get
+ if [ $? -eq 0 ]
+ then
+ apt-get update && apt-get install -y liblttng-ust0 libkrb5-3 zlib1g
+ if [ $? -ne 0 ]
+ then
+ echo "'apt-get' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+
+ # ubuntu 18 uses libcurl4
+ # ubuntu 14, 16 and other linux use libcurl3
+ apt-get install -y libcurl3 || apt-get install -y libcurl4
+ if [ $? -ne 0 ]
+ then
+ echo "'apt-get' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+
+ # debian 9 use libssl1.0.2
+ # other debian linux use libssl1.0.0
+ apt-get install -y libssl1.0.0 || apt install -y libssl1.0.2
+ if [ $? -ne 0 ]
+ then
+ echo "'apt-get' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+
+ # libicu version prefer: libicu52 -> libicu55 -> libicu57 -> libicu60
+ apt-get install -y libicu52 || apt install -y libicu55 || apt install -y libicu57 || apt install -y libicu60
+ if [ $? -ne 0 ]
+ then
+ echo "'apt-get' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ echo "Can not find 'apt' or 'apt-get'"
+ print_errormessage
+ exit 1
+ fi
+ fi
+ elif [ -e /etc/redhat-release ]
+ then
+ echo "The current OS is Fedora based"
+ echo "--------Redhat Version--------"
+ cat /etc/redhat-release
+ echo "------------------------------"
+
+ # use dnf on fedora
+ # use yum on centos and redhat
+ if [ -e /etc/fedora-release ]
+ then
+ command -v dnf
+ if [ $? -eq 0 ]
+ then
+ useCompatSsl=0
+ grep -i 'fedora release 28' /etc/fedora-release
+ if [ $? -eq 0 ]
+ then
+ useCompatSsl=1
+ else
+ grep -i 'fedora release 27' /etc/fedora-release
+ if [ $? -eq 0 ]
+ then
+ useCompatSsl=1
+ else
+ grep -i 'fedora release 26' /etc/fedora-release
+ if [ $? -eq 0 ]
+ then
+ useCompatSsl=1
+ fi
+ fi
+ fi
+
+ if [ $useCompatSsl -eq 1 ]
+ then
+ echo "Use compat-openssl10-devel instead of openssl-devel for Fedora 27/28 (dotnet core requires openssl 1.0.x)"
+ dnf install -y compat-openssl10
+ if [ $? -ne 0 ]
+ then
+ echo "'dnf' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ dnf install -y openssl-libs
+ if [ $? -ne 0 ]
+ then
+ echo "'dnf' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+ fi
+
+ dnf install -y lttng-ust libcurl krb5-libs zlib libicu
+ if [ $? -ne 0 ]
+ then
+ echo "'dnf' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ echo "Can not find 'dnf'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ command -v yum
+ if [ $? -eq 0 ]
+ then
+ yum install -y openssl-libs libcurl krb5-libs zlib libicu
+ if [ $? -ne 0 ]
+ then
+ echo "'yum' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+
+ # install lttng-ust separately since it's not part of offical package repository
+ yum install -y wget && wget -P /etc/yum.repos.d/ https://packages.efficios.com/repo.files/EfficiOS-RHEL7-x86-64.repo && rpmkeys --import https://packages.efficios.com/rhel/repo.key && yum updateinfo && yum install -y lttng-ust
+ if [ $? -ne 0 ]
+ then
+ echo "'lttng-ust' installation failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ echo "Can not find 'yum'"
+ print_errormessage
+ exit 1
+ fi
+ fi
+ else
+ # we might on OpenSUSE
+ OSTYPE=$(grep ID_LIKE /etc/os-release | cut -f2 -d=)
+ echo $OSTYPE
+ if [ $OSTYPE == '"suse"' ]
+ then
+ echo "The current OS is SUSE based"
+ command -v zypper
+ if [ $? -eq 0 ]
+ then
+ zypper -n install lttng-ust libopenssl1_0_0 libcurl4 krb5 zlib libicu52_1
+ if [ $? -ne 0 ]
+ then
+ echo "'zypper' failed with exit code '$?'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ echo "Can not find 'zypper'"
+ print_errormessage
+ exit 1
+ fi
+ else
+ echo "Can't detect current OS type based on /etc/os-release."
+ print_errormessage
+ exit 1
+ fi
+ fi
+elif [ -e /etc/redhat-release ]
+# RHEL6 doesn't have an os-release file defined, read redhat-release instead
+then
+ redhatRelease=$(&2
+ exit 1
+}
+
+if [ ! -f "${TEMPLATE_PATH}" ]; then
+ failed "Must run from runner root or install is corrupt"
+fi
+
+#check if we run as root
+if [[ $(id -u) != "0" ]]; then
+ echo "Failed: This script requires to run with sudo." >&2
+ exit 1
+fi
+
+function install()
+{
+ echo "Creating launch runner in ${UNIT_PATH}"
+ if [ -f "${UNIT_PATH}" ]; then
+ failed "error: exists ${UNIT_PATH}"
+ fi
+
+ if [ -f "${TEMP_PATH}" ]; then
+ rm "${TEMP_PATH}" || failed "failed to delete ${TEMP_PATH}"
+ fi
+
+ # can optionally use username supplied
+ run_as_user=${arg_2:-$SUDO_USER}
+ echo "Run as user: ${run_as_user}"
+
+ run_as_uid=$(id -u ${run_as_user}) || failed "User does not exist"
+ echo "Run as uid: ${run_as_uid}"
+
+ run_as_gid=$(id -g ${run_as_user}) || failed "Group not available"
+ echo "gid: ${run_as_gid}"
+
+ sed "s/{{User}}/${run_as_user}/g; s/{{Description}}/$(echo ${SVC_DESCRIPTION} | sed -e 's/[\/&]/\\&/g')/g; s/{{RunnerRoot}}/$(echo ${RUNNER_ROOT} | sed -e 's/[\/&]/\\&/g')/g;" "${TEMPLATE_PATH}" > "${TEMP_PATH}" || failed "failed to create replacement temp file"
+ mv "${TEMP_PATH}" "${UNIT_PATH}" || failed "failed to copy unit file"
+
+ # unit file should not be executable and world writable
+ chmod 664 ${UNIT_PATH} || failed "failed to set permissions on ${UNIT_PATH}"
+ systemctl daemon-reload || failed "failed to reload daemons"
+
+ # Since we started with sudo, runsvc.sh will be owned by root. Change this to current login user.
+ cp ./bin/runsvc.sh ./runsvc.sh || failed "failed to copy runsvc.sh"
+ chown ${run_as_uid}:${run_as_gid} ./runsvc.sh || failed "failed to set owner for runsvc.sh"
+ chmod 755 ./runsvc.sh || failed "failed to set permission for runsvc.sh"
+
+ systemctl enable ${SVC_NAME} || failed "failed to enable ${SVC_NAME}"
+
+ echo "${SVC_NAME}" > ${CONFIG_PATH} || failed "failed to create .service file"
+ chown ${run_as_uid}:${run_as_gid} ${CONFIG_PATH} || failed "failed to set permission for ${CONFIG_PATH}"
+}
+
+function start()
+{
+ systemctl start ${SVC_NAME} || failed "failed to start ${SVC_NAME}"
+ status
+}
+
+function stop()
+{
+ systemctl stop ${SVC_NAME} || failed "failed to stop ${SVC_NAME}"
+ status
+}
+
+function uninstall()
+{
+ stop
+ systemctl disable ${SVC_NAME} || failed "failed to disable ${SVC_NAME}"
+ rm "${UNIT_PATH}" || failed "failed to delete ${UNIT_PATH}"
+ if [ -f "${CONFIG_PATH}" ]; then
+ rm "${CONFIG_PATH}" || failed "failed to delete ${CONFIG_PATH}"
+ fi
+ systemctl daemon-reload || failed "failed to reload daemons"
+}
+
+function status()
+{
+ if [ -f "${UNIT_PATH}" ]; then
+ echo
+ echo "${UNIT_PATH}"
+ else
+ echo
+ echo "not installed"
+ echo
+ return
+ fi
+
+ systemctl --no-pager status ${SVC_NAME}
+}
+
+function usage()
+{
+ echo
+ echo Usage:
+ echo "./svc.sh [install, start, stop, status, uninstall]"
+ echo "Commands:"
+ echo " install [user]: Install runner service as Root or specified user."
+ echo " start: Manually start the runner service."
+ echo " stop: Manually stop the runner service."
+ echo " status: Display status of runner service."
+ echo " uninstall: Uninstall runner service."
+ echo
+}
+
+case $SVC_CMD in
+ "install") install;;
+ "status") status;;
+ "uninstall") uninstall;;
+ "start") start;;
+ "stop") stop;;
+ "status") status;;
+ *) usage;;
+esac
+
+exit 0
diff --git a/src/Misc/layoutbin/update.cmd.template b/src/Misc/layoutbin/update.cmd.template
new file mode 100644
index 00000000000..70d86fd60a7
--- /dev/null
+++ b/src/Misc/layoutbin/update.cmd.template
@@ -0,0 +1,143 @@
+@echo off
+
+rem runner will replace key words in the template and generate a batch script to run.
+rem Keywords:
+rem PROCESSID = pid
+rem RUNNERPROCESSNAME = Runner.Listener[.exe]
+rem ROOTFOLDER = ./
+rem EXISTRUNNERVERSION = 2.100.0
+rem DOWNLOADRUNNERVERSION = 2.101.0
+rem UPDATELOG = _diag/SelfUpdate-UTC.log
+rem RESTARTINTERACTIVERUNNER = 0/1
+
+setlocal
+set runnerpid=_PROCESS_ID_
+set runnerprocessname=_RUNNER_PROCESS_NAME_
+set rootfolder=_ROOT_FOLDER_
+set existrunnerversion=_EXIST_RUNNER_VERSION_
+set downloadrunnerversion=_DOWNLOAD_RUNNER_VERSION_
+set logfile=_UPDATE_LOG_
+set restartinteractiverunner=_RESTART_INTERACTIVE_RUNNER_
+
+rem log user who run the script
+echo [%date% %time%] --------whoami-------- >> "%logfile%" 2>&1
+whoami >> "%logfile%" 2>&1
+echo [%date% %time%] --------whoami-------- >> "%logfile%" 2>&1
+
+rem wait for runner process to exit.
+echo [%date% %time%] Waiting for %runnerprocessname% (%runnerpid%) to complete >> "%logfile%" 2>&1
+:loop
+tasklist /fi "pid eq %runnerpid%" | find /I "%runnerprocessname%" >> "%logfile%" 2>&1
+if ERRORLEVEL 1 (
+ goto copy
+)
+
+echo [%date% %time%] Process %runnerpid% still running, check again after 1 second. >> "%logfile%" 2>&1
+ping -n 2 127.0.0.1 >nul
+goto loop
+
+rem start re-organize folders
+:copy
+echo [%date% %time%] Process %runnerpid% finished running >> "%logfile%" 2>&1
+echo [%date% %time%] Sleep 1 more second to make sure process exited >> "%logfile%" 2>&1
+ping -n 2 127.0.0.1 >nul
+echo [%date% %time%] Re-organize folders >> "%logfile%" 2>&1
+
+rem the folder structure under runner root will be
+rem ./bin -> bin.2.100.0 (junction folder)
+rem ./externals -> externals.2.100.0 (junction folder)
+rem ./bin.2.100.0
+rem ./externals.2.100.0
+rem ./bin.2.99.0
+rem ./externals.2.99.0
+rem by using the juction folder we can avoid file in use problem.
+
+rem if the bin/externals junction point already exist, we just need to delete the juction point then re-create to point to new bin/externals folder.
+rem if the bin/externals still are real folders, we need to rename the existing folder to bin.version format then create junction point to new bin/externals folder.
+
+rem check bin folder
+rem we do findstr /C:" bin" since in migration mode, we create a junction folder from runner to bin.
+rem as result, dir /AL | findstr "bin" will return the runner folder. output looks like (07/27/2016 05:21 PM runner [E:\bin])
+dir "%rootfolder%" /AL 2>&1 | findstr /C:" bin" >> "%logfile%" 2>&1
+if ERRORLEVEL 1 (
+ rem return code 1 means it can't find a bin folder that is a junction folder
+ rem so we need to move the current bin folder to bin.2.99.0 folder.
+ echo [%date% %time%] move "%rootfolder%\bin" "%rootfolder%\bin.%existrunnerversion%" >> "%logfile%" 2>&1
+ move "%rootfolder%\bin" "%rootfolder%\bin.%existrunnerversion%" >> "%logfile%" 2>&1
+ if ERRORLEVEL 1 (
+ echo [%date% %time%] Can't move "%rootfolder%\bin" to "%rootfolder%\bin.%existrunnerversion%" >> "%logfile%" 2>&1
+ goto fail
+ )
+
+) else (
+ rem otherwise it find a bin folder that is a junction folder
+ rem we just need to delete the junction point.
+ echo [%date% %time%] Delete existing junction bin folder >> "%logfile%" 2>&1
+ rmdir "%rootfolder%\bin" >> "%logfile%" 2>&1
+ if ERRORLEVEL 1 (
+ echo [%date% %time%] Can't delete existing junction bin folder >> "%logfile%" 2>&1
+ goto fail
+ )
+)
+
+rem check externals folder
+dir "%rootfolder%" /AL 2>&1 | findstr "externals" >> "%logfile%" 2>&1
+if ERRORLEVEL 1 (
+ rem return code 1 means it can't find a externals folder that is a junction folder
+ rem so we need to move the current externals folder to externals.2.99.0 folder.
+ echo [%date% %time%] move "%rootfolder%\externals" "%rootfolder%\externals.%existrunnerversion%" >> "%logfile%" 2>&1
+ move "%rootfolder%\externals" "%rootfolder%\externals.%existrunnerversion%" >> "%logfile%" 2>&1
+ if ERRORLEVEL 1 (
+ echo [%date% %time%] Can't move "%rootfolder%\externals" to "%rootfolder%\externals.%existrunnerversion%" >> "%logfile%" 2>&1
+ goto fail
+ )
+) else (
+ rem otherwise it find a externals folder that is a junction folder
+ rem we just need to delete the junction point.
+ echo [%date% %time%] Delete existing junction externals folder >> "%logfile%" 2>&1
+ rmdir "%rootfolder%\externals" >> "%logfile%" 2>&1
+ if ERRORLEVEL 1 (
+ echo [%date% %time%] Can't delete existing junction externals folder >> "%logfile%" 2>&1
+ goto fail
+ )
+)
+
+rem create junction bin folder
+echo [%date% %time%] Create junction bin folder >> "%logfile%" 2>&1
+mklink /J "%rootfolder%\bin" "%rootfolder%\bin.%downloadrunnerversion%" >> "%logfile%" 2>&1
+if ERRORLEVEL 1 (
+ echo [%date% %time%] Can't create junction bin folder >> "%logfile%" 2>&1
+ goto fail
+)
+
+rem create junction externals folder
+echo [%date% %time%] Create junction externals folder >> "%logfile%" 2>&1
+mklink /J "%rootfolder%\externals" "%rootfolder%\externals.%downloadrunnerversion%" >> "%logfile%" 2>&1
+if ERRORLEVEL 1 (
+ echo [%date% %time%] Can't create junction externals folder >> "%logfile%" 2>&1
+ goto fail
+)
+
+echo [%date% %time%] Update succeed >> "%logfile%" 2>&1
+
+rem rename the update log file with %logfile%.succeed/.failed/succeedneedrestart
+rem runner service host can base on the log file name determin the result of the runner update
+echo [%date% %time%] Rename "%logfile%" to be "%logfile%.succeed" >> "%logfile%" 2>&1
+move "%logfile%" "%logfile%.succeed" >nul
+
+rem restart interactive runner if needed
+if %restartinteractiverunner% equ 1 (
+ echo [%date% %time%] Restart interactive runner >> "%logfile%.succeed" 2>&1
+ endlocal
+ start "Actions Runner" cmd.exe /k "_ROOT_FOLDER_\run.cmd"
+) else (
+ endlocal
+)
+
+goto :eof
+
+:fail
+echo [%date% %time%] Rename "%logfile%" to be "%logfile%.failed" >> "%logfile%" 2>&1
+move "%logfile%" "%logfile%.failed" >nul
+goto :eof
+
diff --git a/src/Misc/layoutbin/update.sh.template b/src/Misc/layoutbin/update.sh.template
new file mode 100644
index 00000000000..c09cc1d5b4c
--- /dev/null
+++ b/src/Misc/layoutbin/update.sh.template
@@ -0,0 +1,133 @@
+#!/bin/bash
+
+# runner will replace key words in the template and generate a batch script to run.
+# Keywords:
+# PROCESSID = pid
+# RUNNERPROCESSNAME = Runner.Listener[.exe]
+# ROOTFOLDER = ./
+# EXISTRUNNERVERSION = 2.100.0
+# DOWNLOADRUNNERVERSION = 2.101.0
+# UPDATELOG = _diag/SelfUpdate-UTC.log
+# RESTARTINTERACTIVERUNNER = 0/1
+
+runnerpid=_PROCESS_ID_
+runnerprocessname=_RUNNER_PROCESS_NAME_
+rootfolder="_ROOT_FOLDER_"
+existrunnerversion=_EXIST_RUNNER_VERSION_
+downloadrunnerversion=_DOWNLOAD_RUNNER_VERSION_
+logfile="_UPDATE_LOG_"
+restartinteractiverunner=_RESTART_INTERACTIVE_RUNNER_
+
+# log user who run the script
+date "+[%F %T-%4N] --------whoami--------" >> "$logfile" 2>&1
+whoami >> "$logfile" 2>&1
+date "+[%F %T-%4N] --------whoami--------" >> "$logfile" 2>&1
+
+# wait for runner process to exit.
+date "+[%F %T-%4N] Waiting for $runnerprocessname ($runnerpid) to complete" >> "$logfile" 2>&1
+while [ -e /proc/$runnerpid ]
+do
+ date "+[%F %T-%4N] Process $runnerpid still running" >> "$logfile" 2>&1
+ ping -c 2 127.0.0.1 >nul
+done
+date "+[%F %T-%4N] Process $runnerpid finished running" >> "$logfile" 2>&1
+
+# start re-organize folders
+date "+[%F %T-%4N] Sleep 1 more second to make sure process exited" >> "$logfile" 2>&1
+ping -c 2 127.0.0.1 >nul
+
+# the folder structure under runner root will be
+# ./bin -> bin.2.100.0 (junction folder)
+# ./externals -> externals.2.100.0 (junction folder)
+# ./bin.2.100.0
+# ./externals.2.100.0
+# ./bin.2.99.0
+# ./externals.2.99.0
+# by using the juction folder we can avoid file in use problem.
+
+# if the bin/externals junction point already exist, we just need to delete the juction point then re-create to point to new bin/externals folder.
+# if the bin/externals still are real folders, we need to rename the existing folder to bin.version format then create junction point to new bin/externals folder.
+
+# check bin folder
+if [[ -L "$rootfolder/bin" && -d "$rootfolder/bin" ]]
+then
+ # return code 0 means it find a bin folder that is a junction folder
+ # we just need to delete the junction point.
+ date "+[%F %T-%4N] Delete existing junction bin folder" >> "$logfile"
+ rm "$rootfolder/bin" >> "$logfile"
+ if [ $? -ne 0 ]
+ then
+ date "+[%F %T-%4N] Can't delete existing junction bin folder" >> "$logfile"
+ mv -fv "$logfile" "$logfile.failed"
+ exit 1
+ fi
+else
+ # otherwise, we need to move the current bin folder to bin.2.99.0 folder.
+ date "+[%F %T-%4N] move $rootfolder/bin $rootfolder/bin.$existrunnerversion" >> "$logfile" 2>&1
+ mv -fv "$rootfolder/bin" "$rootfolder/bin.$existrunnerversion" >> "$logfile" 2>&1
+ if [ $? -ne 0 ]
+ then
+ date "+[%F %T-%4N] Can't move $rootfolder/bin to $rootfolder/bin.$existrunnerversion" >> "$logfile" 2>&1
+ mv -fv "$logfile" "$logfile.failed"
+ exit 1
+ fi
+fi
+
+# check externals folder
+if [[ -L "$rootfolder/externals" && -d "$rootfolder/externals" ]]
+then
+ # the externals folder is already a junction folder
+ # we just need to delete the junction point.
+ date "+[%F %T-%4N] Delete existing junction externals folder" >> "$logfile"
+ rm "$rootfolder/externals" >> "$logfile"
+ if [ $? -ne 0 ]
+ then
+ date "+[%F %T-%4N] Can't delete existing junction externals folder" >> "$logfile"
+ mv -fv "$logfile" "$logfile.failed"
+ exit 1
+ fi
+else
+ # otherwise, we need to move the current externals folder to externals.2.99.0 folder.
+ date "+[%F %T-%4N] move $rootfolder/externals $rootfolder/externals.$existrunnerversion" >> "$logfile" 2>&1
+ mv -fv "$rootfolder/externals" "$rootfolder/externals.$existrunnerversion" >> "$logfile" 2>&1
+ if [ $? -ne 0 ]
+ then
+ date "+[%F %T-%4N] Can't move $rootfolder/externals to $rootfolder/externals.$existrunnerversion" >> "$logfile" 2>&1
+ mv -fv "$logfile" "$logfile.failed"
+ exit 1
+ fi
+fi
+
+# create junction bin folder
+date "+[%F %T-%4N] Create junction bin folder" >> "$logfile" 2>&1
+ln -s "$rootfolder/bin.$downloadrunnerversion" "$rootfolder/bin" >> "$logfile" 2>&1
+if [ $? -ne 0 ]
+then
+ date "+[%F %T-%4N] Can't create junction bin folder" >> "$logfile" 2>&1
+ mv -fv "$logfile" "$logfile.failed"
+ exit 1
+fi
+
+# create junction externals folder
+date "+[%F %T-%4N] Create junction externals folder" >> "$logfile" 2>&1
+ln -s "$rootfolder/externals.$downloadrunnerversion" "$rootfolder/externals" >> "$logfile" 2>&1
+if [ $? -ne 0 ]
+then
+ date "+[%F %T-%4N] Can't create junction externals folder" >> "$logfile" 2>&1
+ mv -fv "$logfile" "$logfile.failed"
+ exit 1
+fi
+
+date "+[%F %T-%4N] Update succeed" >> "$logfile"
+
+# rename the update log file with %logfile%.succeed/.failed/succeedneedrestart
+# runner service host can base on the log file name determin the result of the runner update
+date "+[%F %T-%4N] Rename $logfile to be $logfile.succeed" >> "$logfile" 2>&1
+mv -fv "$logfile" "$logfile.succeed" >> "$logfile" 2>&1
+
+# restart interactive runner if needed
+if [ $restartinteractiverunner -ne 0 ]
+then
+ date "+[%F %T-%4N] Restarting interactive runner" >> "$logfile.succeed" 2>&1
+ "$rootfolder/run.sh" &
+fi
diff --git a/src/Misc/layoutroot/config.cmd b/src/Misc/layoutroot/config.cmd
new file mode 100644
index 00000000000..31c62ff3e6b
--- /dev/null
+++ b/src/Misc/layoutroot/config.cmd
@@ -0,0 +1,26 @@
+@echo off
+
+rem ********************************************************************************
+rem Unblock specific files.
+rem ********************************************************************************
+setlocal
+if defined VERBOSE_ARG (
+ set VERBOSE_ARG='Continue'
+) else (
+ set VERBOSE_ARG='SilentlyContinue'
+)
+
+rem Unblock files in the root of the layout folder. E.g. .cmd files.
+powershell.exe -NoLogo -Sta -NoProfile -NonInteractive -ExecutionPolicy Unrestricted -Command "$VerbosePreference = %VERBOSE_ARG% ; Get-ChildItem -LiteralPath '%~dp0' | ForEach-Object { Write-Verbose ('Unblock: {0}' -f $_.FullName) ; $_ } | Unblock-File | Out-Null"
+
+if /i "%~1" equ "remove" (
+ rem ********************************************************************************
+ rem Unconfigure the runner.
+ rem ********************************************************************************
+ "%~dp0bin\Runner.Listener.exe" %*
+) else (
+ rem ********************************************************************************
+ rem Configure the runner.
+ rem ********************************************************************************
+ "%~dp0bin\Runner.Listener.exe" configure %*
+)
diff --git a/src/Misc/layoutroot/config.sh b/src/Misc/layoutroot/config.sh
new file mode 100755
index 00000000000..20ec606f776
--- /dev/null
+++ b/src/Misc/layoutroot/config.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+user_id=`id -u`
+
+# we want to snapshot the environment of the config user
+if [ $user_id -eq 0 -a -z "$AGENT_ALLOW_RUNASROOT" ]; then
+ echo "Must not run with sudo"
+ exit 1
+fi
+
+# Check dotnet core 2.1 dependencies for Linux
+if [[ (`uname` == "Linux") ]]
+then
+ command -v ldd > /dev/null
+ if [ $? -ne 0 ]
+ then
+ echo "Can not find 'ldd'. Please install 'ldd' and try again."
+ exit 1
+ fi
+
+ ldd ./bin/libcoreclr.so | grep 'not found'
+ if [ $? -eq 0 ]; then
+ echo "Dependencies is missing for Dotnet Core 2.1"
+ echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 2.1 dependencies."
+ exit 1
+ fi
+
+ ldd ./bin/System.Security.Cryptography.Native.OpenSsl.so | grep 'not found'
+ if [ $? -eq 0 ]; then
+ echo "Dependencies is missing for Dotnet Core 2.1"
+ echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 2.1 dependencies."
+ exit 1
+ fi
+
+ ldd ./bin/System.IO.Compression.Native.so | grep 'not found'
+ if [ $? -eq 0 ]; then
+ echo "Dependencies is missing for Dotnet Core 2.1"
+ echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 2.1 dependencies."
+ exit 1
+ fi
+
+ ldd ./bin/System.Net.Http.Native.so | grep 'not found'
+ if [ $? -eq 0 ]; then
+ echo "Dependencies is missing for Dotnet Core 2.1"
+ echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 2.1 dependencies."
+ exit 1
+ fi
+
+ if ! [ -x "$(command -v ldconfig)" ]; then
+ LDCONFIG_COMMAND="/sbin/ldconfig"
+ if ! [ -x "$LDCONFIG_COMMAND" ]; then
+ echo "Can not find 'ldconfig' in PATH and '/sbin/ldconfig' doesn't exists either. Please install 'ldconfig' and try again."
+ exit 1
+ fi
+ else
+ LDCONFIG_COMMAND="ldconfig"
+ fi
+
+ libpath=${LD_LIBRARY_PATH:-}
+ $LDCONFIG_COMMAND -NXv ${libpath//:/} 2>&1 | grep libicu >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ echo "Libicu's dependencies is missing for Dotnet Core 2.1"
+ echo "Execute ./bin/installdependencies.sh to install any missing Dotnet Core 2.1 dependencies."
+ exit 1
+ fi
+fi
+
+# Change directory to the script root directory
+# https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
+ DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+ SOURCE="$(readlink "$SOURCE")"
+ [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
+done
+DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+cd $DIR
+
+source ./env.sh
+
+shopt -s nocasematch
+if [[ "$1" == "remove" ]]; then
+ ./bin/Runner.Listener "$@"
+else
+ ./bin/Runner.Listener configure "$@"
+fi
diff --git a/src/Misc/layoutroot/env.sh b/src/Misc/layoutroot/env.sh
new file mode 100755
index 00000000000..9aaf2bf5176
--- /dev/null
+++ b/src/Misc/layoutroot/env.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+varCheckList=(
+ 'LANG'
+ 'JAVA_HOME'
+ 'ANT_HOME'
+ 'M2_HOME'
+ 'ANDROID_HOME'
+ 'GRADLE_HOME'
+ 'NVM_BIN'
+ 'NVM_PATH'
+ 'VSTS_HTTP_PROXY'
+ 'VSTS_HTTP_PROXY_USERNAME'
+ 'VSTS_HTTP_PROXY_PASSWORD'
+ 'LD_LIBRARY_PATH'
+ 'PERL5LIB'
+ )
+
+envContents=""
+
+if [ -f ".env" ]; then
+ envContents=`cat .env`
+else
+ touch .env
+fi
+
+function writeVar()
+{
+ checkVar="$1"
+ checkDelim="${1}="
+ if test "${envContents#*$checkDelim}" = "$envContents"
+ then
+ if [ ! -z "${!checkVar}" ]; then
+ echo "${checkVar}=${!checkVar}">>.env
+ fi
+ fi
+}
+
+echo $PATH>.path
+
+for var_name in ${varCheckList[@]}
+do
+ writeVar "${var_name}"
+done
diff --git a/src/Misc/layoutroot/run.cmd b/src/Misc/layoutroot/run.cmd
new file mode 100644
index 00000000000..df5fd390975
--- /dev/null
+++ b/src/Misc/layoutroot/run.cmd
@@ -0,0 +1,33 @@
+@echo off
+
+rem ********************************************************************************
+rem Unblock specific files.
+rem ********************************************************************************
+setlocal
+if defined VERBOSE_ARG (
+ set VERBOSE_ARG='Continue'
+) else (
+ set VERBOSE_ARG='SilentlyContinue'
+)
+
+rem Unblock files in the root of the layout folder. E.g. .cmd files.
+powershell.exe -NoLogo -Sta -NoProfile -NonInteractive -ExecutionPolicy Unrestricted -Command "$VerbosePreference = %VERBOSE_ARG% ; Get-ChildItem -LiteralPath '%~dp0' | ForEach-Object { Write-Verbose ('Unblock: {0}' -f $_.FullName) ; $_ } | Unblock-File | Out-Null"
+
+if /i "%~1" equ "localRun" (
+ rem ********************************************************************************
+ rem Local run.
+ rem ********************************************************************************
+ "%~dp0bin\Runner.Listener.exe" %*
+) else (
+ rem ********************************************************************************
+ rem Run.
+ rem ********************************************************************************
+ "%~dp0bin\Runner.Listener.exe" run %*
+
+ rem Return code 4 means the run once runner received an update message.
+ rem Sleep 5 seconds to wait for the update process finish and run the runner again.
+ if ERRORLEVEL 4 (
+ timeout /t 5 /nobreak > NUL
+ "%~dp0bin\Runner.Listener.exe" run %*
+ )
+)
diff --git a/src/Misc/layoutroot/run.sh b/src/Misc/layoutroot/run.sh
new file mode 100755
index 00000000000..c874d8274b4
--- /dev/null
+++ b/src/Misc/layoutroot/run.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+# Validate not sudo
+user_id=`id -u`
+if [ $user_id -eq 0 -a -z "$AGENT_ALLOW_RUNASROOT" ]; then
+ echo "Must not run interactively with sudo"
+ exit 1
+fi
+
+# Change directory to the script root directory
+# https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
+ DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+ SOURCE="$(readlink "$SOURCE")"
+ [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
+done
+DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+
+# Do not "cd $DIR". For localRun, the current directory is expected to be the repo location on disk.
+
+# Run
+shopt -s nocasematch
+if [[ "$1" == "localRun" ]]; then
+ "$DIR"/bin/Runner.Listener $*
+else
+ "$DIR"/bin/Runner.Listener run $*
+
+# Return code 4 means the run once agent received an update message.
+# Sleep 5 seconds to wait for the update process finish and run the agent again.
+ returnCode=$?
+ if [[ $returnCode == 4 ]]; then
+ if [ ! -x "$(command -v sleep)" ]; then
+ if [ ! -x "$(command -v ping)" ]; then
+ COUNT="0"
+ while [[ $COUNT != 5000 ]]; do
+ echo "SLEEP" >nul
+ COUNT=$[$COUNT+1]
+ done
+ else
+ ping -n 5 127.0.0.1 >nul
+ fi
+ else
+ sleep 5 >nul
+ fi
+
+ "$DIR"/bin/Runner.Listener run $*
+ else
+ exit $returnCode
+ fi
+fi
diff --git a/src/NuGet.Config b/src/NuGet.Config
new file mode 100644
index 00000000000..5816f1bb346
--- /dev/null
+++ b/src/NuGet.Config
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/src/Runner.Common/ActionCommand.cs b/src/Runner.Common/ActionCommand.cs
new file mode 100644
index 00000000000..eff88742934
--- /dev/null
+++ b/src/Runner.Common/ActionCommand.cs
@@ -0,0 +1,253 @@
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+using System;
+using System.Collections.Generic;
+
+namespace GitHub.Runner.Common
+{
+ public sealed class ActionCommand
+ {
+ private static readonly EscapeMapping[] _escapeMappings = new[]
+ {
+ new EscapeMapping(token: "%", replacement: "%25"),
+ new EscapeMapping(token: ";", replacement: "%3B"),
+ new EscapeMapping(token: "\r", replacement: "%0D"),
+ new EscapeMapping(token: "\n", replacement: "%0A"),
+ new EscapeMapping(token: "]", replacement: "%5D"),
+ };
+
+ private static readonly EscapeMapping[] _escapeDataMappings = new[]
+ {
+ new EscapeMapping(token: "\r", replacement: "%0D"),
+ new EscapeMapping(token: "\n", replacement: "%0A"),
+ };
+
+ private static readonly EscapeMapping[] _escapePropertyMappings = new[]
+ {
+ new EscapeMapping(token: "%", replacement: "%25"),
+ new EscapeMapping(token: "\r", replacement: "%0D"),
+ new EscapeMapping(token: "\n", replacement: "%0A"),
+ new EscapeMapping(token: ":", replacement: "%3A"),
+ new EscapeMapping(token: ",", replacement: "%2C"),
+ };
+
+ private readonly Dictionary _properties = new Dictionary(StringComparer.OrdinalIgnoreCase);
+ public const string Prefix = "##[";
+ public const string _commandKey = "::";
+
+ public ActionCommand(string command)
+ {
+ ArgUtil.NotNullOrEmpty(command, nameof(command));
+ Command = command;
+ }
+
+ public string Command { get; }
+
+
+ public Dictionary Properties => _properties;
+
+ public string Data { get; set; }
+
+ public static bool TryParseV2(string message, HashSet registeredCommands, out ActionCommand command)
+ {
+ command = null;
+ if (string.IsNullOrEmpty(message))
+ {
+ return false;
+ }
+
+ try
+ {
+ // the message needs to start with the keyword after trim leading space.
+ message = message.TrimStart();
+ if (!message.StartsWith(_commandKey))
+ {
+ return false;
+ }
+
+ // Get the index of the separator between the command info and the data.
+ int endIndex = message.IndexOf(_commandKey, _commandKey.Length);
+ if (endIndex < 0)
+ {
+ return false;
+ }
+
+ // Get the command info (command and properties).
+ int cmdIndex = _commandKey.Length;
+ string cmdInfo = message.Substring(cmdIndex, endIndex - cmdIndex);
+
+ // Get the command name
+ int spaceIndex = cmdInfo.IndexOf(' ');
+ string commandName =
+ spaceIndex < 0
+ ? cmdInfo
+ : cmdInfo.Substring(0, spaceIndex);
+
+ if (registeredCommands.Contains(commandName))
+ {
+ // Initialize the command.
+ command = new ActionCommand(commandName);
+ }
+ else
+ {
+ return false;
+ }
+
+ // Set the properties.
+ if (spaceIndex > 0)
+ {
+ string propertiesStr = cmdInfo.Substring(spaceIndex + 1).Trim();
+ string[] splitProperties = propertiesStr.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries);
+ foreach (string propertyStr in splitProperties)
+ {
+ string[] pair = propertyStr.Split(new[] { '=' }, count: 2, options: StringSplitOptions.RemoveEmptyEntries);
+ if (pair.Length == 2)
+ {
+ command.Properties[pair[0]] = UnescapeProperty(pair[1]);
+ }
+ }
+ }
+
+ command.Data = UnescapeData(message.Substring(endIndex + _commandKey.Length));
+ return true;
+ }
+ catch
+ {
+ command = null;
+ return false;
+ }
+ }
+
+ public static bool TryParse(string message, HashSet registeredCommands, out ActionCommand command)
+ {
+ command = null;
+ if (string.IsNullOrEmpty(message))
+ {
+ return false;
+ }
+
+ try
+ {
+ // Get the index of the prefix.
+ int prefixIndex = message.IndexOf(Prefix);
+ if (prefixIndex < 0)
+ {
+ return false;
+ }
+
+ // Get the index of the separator between the command info and the data.
+ int rbIndex = message.IndexOf(']', prefixIndex);
+ if (rbIndex < 0)
+ {
+ return false;
+ }
+
+ // Get the command info (command and properties).
+ int cmdIndex = prefixIndex + Prefix.Length;
+ string cmdInfo = message.Substring(cmdIndex, rbIndex - cmdIndex);
+
+ // Get the command name
+ int spaceIndex = cmdInfo.IndexOf(' ');
+ string commandName =
+ spaceIndex < 0
+ ? cmdInfo
+ : cmdInfo.Substring(0, spaceIndex);
+
+ if (registeredCommands.Contains(commandName))
+ {
+ // Initialize the command.
+ command = new ActionCommand(commandName);
+ }
+ else
+ {
+ return false;
+ }
+
+ // Set the properties.
+ if (spaceIndex > 0)
+ {
+ string propertiesStr = cmdInfo.Substring(spaceIndex + 1);
+ string[] splitProperties = propertiesStr.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries);
+ foreach (string propertyStr in splitProperties)
+ {
+ string[] pair = propertyStr.Split(new[] { '=' }, count: 2, options: StringSplitOptions.RemoveEmptyEntries);
+ if (pair.Length == 2)
+ {
+ command.Properties[pair[0]] = Unescape(pair[1]);
+ }
+ }
+ }
+
+ command.Data = Unescape(message.Substring(rbIndex + 1));
+ return true;
+ }
+ catch
+ {
+ command = null;
+ return false;
+ }
+ }
+
+ private static string Unescape(string escaped)
+ {
+ if (string.IsNullOrEmpty(escaped))
+ {
+ return string.Empty;
+ }
+
+ string unescaped = escaped;
+ foreach (EscapeMapping mapping in _escapeMappings)
+ {
+ unescaped = unescaped.Replace(mapping.Replacement, mapping.Token);
+ }
+
+ return unescaped;
+ }
+
+ private static string UnescapeProperty(string escaped)
+ {
+ if (string.IsNullOrEmpty(escaped))
+ {
+ return string.Empty;
+ }
+
+ string unescaped = escaped;
+ foreach (EscapeMapping mapping in _escapePropertyMappings)
+ {
+ unescaped = unescaped.Replace(mapping.Replacement, mapping.Token);
+ }
+
+ return unescaped;
+ }
+
+ private static string UnescapeData(string escaped)
+ {
+ if (string.IsNullOrEmpty(escaped))
+ {
+ return string.Empty;
+ }
+
+ string unescaped = escaped;
+ foreach (EscapeMapping mapping in _escapeDataMappings)
+ {
+ unescaped = unescaped.Replace(mapping.Replacement, mapping.Token);
+ }
+
+ return unescaped;
+ }
+
+ private sealed class EscapeMapping
+ {
+ public string Replacement { get; }
+ public string Token { get; }
+
+ public EscapeMapping(string token, string replacement)
+ {
+ ArgUtil.NotNullOrEmpty(token, nameof(token));
+ ArgUtil.NotNullOrEmpty(replacement, nameof(replacement));
+ Token = token;
+ Replacement = replacement;
+ }
+ }
+ }
+}
diff --git a/src/Runner.Common/ActionResult.cs b/src/Runner.Common/ActionResult.cs
new file mode 100644
index 00000000000..db68b685249
--- /dev/null
+++ b/src/Runner.Common/ActionResult.cs
@@ -0,0 +1,15 @@
+using System;
+
+namespace GitHub.Runner.Common
+{
+ public enum ActionResult
+ {
+ Success = 0,
+
+ Failure = 1,
+
+ Cancelled = 2,
+
+ Skipped = 3
+ }
+}
\ No newline at end of file
diff --git a/src/Runner.Common/AsyncManualResetEvent.cs b/src/Runner.Common/AsyncManualResetEvent.cs
new file mode 100644
index 00000000000..42f5b784442
--- /dev/null
+++ b/src/Runner.Common/AsyncManualResetEvent.cs
@@ -0,0 +1,33 @@
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace GitHub.Runner.Common
+{
+ //Stephen Toub: http://blogs.msdn.com/b/pfxteam/archive/2012/02/11/10266920.aspx
+
+ public class AsyncManualResetEvent
+ {
+ private volatile TaskCompletionSource m_tcs = new TaskCompletionSource();
+
+ public Task WaitAsync() { return m_tcs.Task; }
+
+ public void Set()
+ {
+ var tcs = m_tcs;
+ Task.Factory.StartNew(s => ((TaskCompletionSource)s).TrySetResult(true),
+ tcs, CancellationToken.None, TaskCreationOptions.PreferFairness, TaskScheduler.Default);
+ tcs.Task.Wait();
+ }
+
+ public void Reset()
+ {
+ while (true)
+ {
+ var tcs = m_tcs;
+ if (!tcs.Task.IsCompleted ||
+ Interlocked.CompareExchange(ref m_tcs, new TaskCompletionSource(), tcs) == tcs)
+ return;
+ }
+ }
+ }
+}
diff --git a/src/Runner.Common/Capabilities/CapabilitiesManager.cs b/src/Runner.Common/Capabilities/CapabilitiesManager.cs
new file mode 100644
index 00000000000..76bc5fca99f
--- /dev/null
+++ b/src/Runner.Common/Capabilities/CapabilitiesManager.cs
@@ -0,0 +1,73 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+
+namespace GitHub.Runner.Common.Capabilities
+{
+ [ServiceLocator(Default = typeof(CapabilitiesManager))]
+ public interface ICapabilitiesManager : IRunnerService
+ {
+ Task> GetCapabilitiesAsync(RunnerSettings settings, CancellationToken token);
+ }
+
+ public sealed class CapabilitiesManager : RunnerService, ICapabilitiesManager
+ {
+ public async Task> GetCapabilitiesAsync(RunnerSettings settings, CancellationToken cancellationToken)
+ {
+ Trace.Entering();
+ ArgUtil.NotNull(settings, nameof(settings));
+
+ // Initialize a dictionary of capabilities.
+ var capabilities = new Dictionary(StringComparer.OrdinalIgnoreCase);
+
+ if (settings.SkipCapabilitiesScan)
+ {
+ Trace.Info("Skip capabilities scan.");
+ return capabilities;
+ }
+
+ // Get the providers.
+ var extensionManager = HostContext.GetService();
+ IEnumerable providers =
+ extensionManager
+ .GetExtensions()
+ ?.OrderBy(x => x.Order);
+
+ // Add each capability returned from each provider.
+ foreach (ICapabilitiesProvider provider in providers ?? new ICapabilitiesProvider[0])
+ {
+ foreach (Capability capability in await provider.GetCapabilitiesAsync(settings, cancellationToken) ?? new List())
+ {
+ // Make sure we mask secrets in capabilities values.
+ capabilities[capability.Name] = HostContext.SecretMasker.MaskSecrets(capability.Value);
+ }
+ }
+
+ return capabilities;
+ }
+ }
+
+ public interface ICapabilitiesProvider : IExtension
+ {
+ int Order { get; }
+
+ Task> GetCapabilitiesAsync(RunnerSettings settings, CancellationToken cancellationToken);
+ }
+
+ public sealed class Capability
+ {
+ public string Name { get; }
+ public string Value { get; }
+
+ public Capability(string name, string value)
+ {
+ ArgUtil.NotNullOrEmpty(name, nameof(name));
+ Name = name;
+ Value = value ?? string.Empty;
+ }
+ }
+}
diff --git a/src/Runner.Common/Capabilities/RunnerCapabilitiesProvider.cs b/src/Runner.Common/Capabilities/RunnerCapabilitiesProvider.cs
new file mode 100644
index 00000000000..6821e4de089
--- /dev/null
+++ b/src/Runner.Common/Capabilities/RunnerCapabilitiesProvider.cs
@@ -0,0 +1,86 @@
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+using Microsoft.Win32;
+using System;
+using System.Collections.Generic;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace GitHub.Runner.Common.Capabilities
+{
+ public sealed class RunnerCapabilitiesProvider : RunnerService, ICapabilitiesProvider
+ {
+ public Type ExtensionType => typeof(ICapabilitiesProvider);
+
+ public int Order => 99; // Process last to override prior.
+
+ public Task> GetCapabilitiesAsync(RunnerSettings settings, CancellationToken cancellationToken)
+ {
+ ArgUtil.NotNull(settings, nameof(settings));
+ var capabilities = new List();
+ Add(capabilities, "Runner.Name", settings.AgentName ?? string.Empty);
+ Add(capabilities, "Runner.OS", VarUtil.OS);
+ Add(capabilities, "Runner.OSArchitecture", VarUtil.OSArchitecture);
+#if OS_WINDOWS
+ Add(capabilities, "Runner.OSVersion", GetOSVersionString());
+#endif
+ Add(capabilities, "InteractiveSession", (HostContext.StartupType != StartupType.Service).ToString());
+ Add(capabilities, "Runner.Version", BuildConstants.RunnerPackage.Version);
+ Add(capabilities, "Runner.ComputerName", Environment.MachineName ?? string.Empty);
+ Add(capabilities, "Runner.HomeDirectory", HostContext.GetDirectory(WellKnownDirectory.Root));
+ return Task.FromResult(capabilities);
+ }
+
+ private void Add(List capabilities, string name, string value)
+ {
+ Trace.Info($"Adding '{name}': '{value}'");
+ capabilities.Add(new Capability(name, value));
+ }
+
+ private object GetHklmValue(string keyName, string valueName)
+ {
+ keyName = $@"HKEY_LOCAL_MACHINE\{keyName}";
+ object value = Registry.GetValue(keyName, valueName, defaultValue: null);
+ if (object.ReferenceEquals(value, null))
+ {
+ Trace.Info($"Key name '{keyName}', value name '{valueName}' is null.");
+ return null;
+ }
+
+ Trace.Info($"Key name '{keyName}', value name '{valueName}': '{value}'");
+ return value;
+ }
+
+ private string GetOSVersionString()
+ {
+ // Do not use System.Environment.OSVersion.Version to resolve the OS version number.
+ // It leverages the GetVersionEx function which may report an incorrect version
+ // depending on the app's manifest. For details, see:
+ // https://msdn.microsoft.com/library/windows/desktop/ms724451(v=vs.85).aspx
+
+ // Attempt to retrieve the major/minor version from the new registry values added in
+ // in Windows 10.
+ //
+ // The registry value "CurrentVersion" is unreliable in Windows 10. It contains the
+ // value "6.3" instead of "10.0".
+ object major = GetHklmValue(@"SOFTWARE\Microsoft\Windows NT\CurrentVersion", "CurrentMajorVersionNumber");
+ object minor = GetHklmValue(@"SOFTWARE\Microsoft\Windows NT\CurrentVersion", "CurrentMinorVersionNumber");
+ string majorMinorString;
+ if (major != null && minor != null)
+ {
+ majorMinorString = StringUtil.Format("{0}.{1}", major, minor);
+ }
+ else
+ {
+ // Fallback to the registry value "CurrentVersion".
+ majorMinorString = GetHklmValue(@"SOFTWARE\Microsoft\Windows NT\CurrentVersion", "CurrentVersion") as string;
+ }
+
+ // Opted to use the registry value "CurrentBuildNumber" over "CurrentBuild". Based on brief
+ // internet investigation, the only difference appears to be that on Windows XP "CurrentBuild"
+ // was unreliable and "CurrentBuildNumber" was the correct choice.
+ string build = GetHklmValue(@"SOFTWARE\Microsoft\Windows NT\CurrentVersion", "CurrentBuildNumber") as string;
+ return StringUtil.Format("{0}.{1}", majorMinorString, build);
+ }
+ }
+}
diff --git a/src/Runner.Common/CommandLineParser.cs b/src/Runner.Common/CommandLineParser.cs
new file mode 100644
index 00000000000..bca3b461ab8
--- /dev/null
+++ b/src/Runner.Common/CommandLineParser.cs
@@ -0,0 +1,128 @@
+using GitHub.Runner.Common.Util;
+using System;
+using System.Collections.Generic;
+using GitHub.DistributedTask.Logging;
+using GitHub.Runner.Sdk;
+
+//
+// Pattern:
+// cmd1 cmd2 --arg1 arg1val --aflag --arg2 arg2val
+//
+
+namespace GitHub.Runner.Common
+{
+ public sealed class CommandLineParser
+ {
+ private ISecretMasker _secretMasker;
+ private Tracing _trace;
+
+ public List Commands { get; }
+ public HashSet Flags { get; }
+ public Dictionary Args { get; }
+ public HashSet SecretArgNames { get; }
+ private bool HasArgs { get; set; }
+
+ public CommandLineParser(IHostContext hostContext, string[] secretArgNames)
+ {
+ _secretMasker = hostContext.SecretMasker;
+ _trace = hostContext.GetTrace(nameof(CommandLineParser));
+
+ Commands = new List();
+ Flags = new HashSet(StringComparer.OrdinalIgnoreCase);
+ Args = new Dictionary(StringComparer.OrdinalIgnoreCase);
+ SecretArgNames = new HashSet(secretArgNames ?? new string[0], StringComparer.OrdinalIgnoreCase);
+ }
+
+ public bool IsCommand(string name)
+ {
+ bool result = false;
+ if (Commands.Count > 0)
+ {
+ result = String.Equals(name, Commands[0], StringComparison.CurrentCultureIgnoreCase);
+ }
+
+ return result;
+ }
+
+ public void Parse(string[] args)
+ {
+ _trace.Info(nameof(Parse));
+ ArgUtil.NotNull(args, nameof(args));
+ _trace.Info("Parsing {0} args", args.Length);
+
+ string argScope = null;
+ foreach (string arg in args)
+ {
+ _trace.Info("parsing argument");
+
+ HasArgs = HasArgs || arg.StartsWith("--");
+ _trace.Info("HasArgs: {0}", HasArgs);
+
+ if (string.Equals(arg, "/?", StringComparison.Ordinal))
+ {
+ Flags.Add("help");
+ }
+ else if (!HasArgs)
+ {
+ _trace.Info("Adding Command: {0}", arg);
+ Commands.Add(arg.Trim());
+ }
+ else
+ {
+ // it's either an arg, an arg value or a flag
+ if (arg.StartsWith("--") && arg.Length > 2)
+ {
+ string argVal = arg.Substring(2);
+ _trace.Info("arg: {0}", argVal);
+
+ // this means two --args in a row which means previous was a flag
+ if (argScope != null)
+ {
+ _trace.Info("Adding flag: {0}", argScope);
+ Flags.Add(argScope.Trim());
+ }
+
+ argScope = argVal;
+ }
+ else if (!arg.StartsWith("-"))
+ {
+ // we found a value - check if we're in scope of an arg
+ if (argScope != null && !Args.ContainsKey(argScope = argScope.Trim()))
+ {
+ if (SecretArgNames.Contains(argScope))
+ {
+ _secretMasker.AddValue(arg);
+ }
+
+ _trace.Info("Adding option '{0}': '{1}'", argScope, arg);
+ // ignore duplicates - first wins - below will be val1
+ // --arg1 val1 --arg1 val1
+ Args.Add(argScope, arg);
+ argScope = null;
+ }
+ }
+ else
+ {
+ //
+ // ignoring the second value for an arg (val2 below)
+ // --arg val1 val2
+
+ // ignoring invalid things like empty - and --
+ // --arg val1 -- --flag
+ _trace.Info("Ignoring arg");
+ }
+ }
+ }
+
+ _trace.Verbose("done parsing arguments");
+
+ // handle last arg being a flag
+ if (argScope != null)
+ {
+ Flags.Add(argScope);
+ }
+
+ _trace.Verbose("Exiting parse");
+ }
+ }
+}
diff --git a/src/Runner.Common/ConfigurationStore.cs b/src/Runner.Common/ConfigurationStore.cs
new file mode 100644
index 00000000000..d63d0fdedb2
--- /dev/null
+++ b/src/Runner.Common/ConfigurationStore.cs
@@ -0,0 +1,252 @@
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+using System.IO;
+using System.Runtime.Serialization;
+using System.Text;
+using System.Threading;
+
+namespace GitHub.Runner.Common
+{
+ //
+ // Settings are persisted in this structure
+ //
+ [DataContract]
+ public sealed class RunnerSettings
+ {
+ [DataMember(EmitDefaultValue = false)]
+ public bool AcceptTeeEula { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public int AgentId { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string AgentName { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string NotificationPipeName { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string NotificationSocketAddress { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public bool SkipCapabilitiesScan { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public bool SkipSessionRecover { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public int PoolId { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string PoolName { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string ServerUrl { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string GitHubUrl { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string WorkFolder { get; set; }
+
+ [DataMember(EmitDefaultValue = false)]
+ public string MonitorSocketAddress { get; set; }
+ }
+
+ [DataContract]
+ public sealed class RunnerRuntimeOptions
+ {
+#if OS_WINDOWS
+ [DataMember(EmitDefaultValue = false)]
+ public bool GitUseSecureChannel { get; set; }
+#endif
+ }
+
+ [ServiceLocator(Default = typeof(ConfigurationStore))]
+ public interface IConfigurationStore : IRunnerService
+ {
+ bool IsConfigured();
+ bool IsServiceConfigured();
+ bool HasCredentials();
+ CredentialData GetCredentials();
+ RunnerSettings GetSettings();
+ void SaveCredential(CredentialData credential);
+ void SaveSettings(RunnerSettings settings);
+ void DeleteCredential();
+ void DeleteSettings();
+ RunnerRuntimeOptions GetRunnerRuntimeOptions();
+ void SaveRunnerRuntimeOptions(RunnerRuntimeOptions options);
+ void DeleteRunnerRuntimeOptions();
+ }
+
+ public sealed class ConfigurationStore : RunnerService, IConfigurationStore
+ {
+ private string _binPath;
+ private string _configFilePath;
+ private string _credFilePath;
+ private string _serviceConfigFilePath;
+ private string _runtimeOptionsFilePath;
+
+ private CredentialData _creds;
+ private RunnerSettings _settings;
+ private RunnerRuntimeOptions _runtimeOptions;
+
+ public override void Initialize(IHostContext hostContext)
+ {
+ base.Initialize(hostContext);
+
+ var currentAssemblyLocation = System.Reflection.Assembly.GetEntryAssembly().Location;
+ Trace.Info("currentAssemblyLocation: {0}", currentAssemblyLocation);
+
+ _binPath = HostContext.GetDirectory(WellKnownDirectory.Bin);
+ Trace.Info("binPath: {0}", _binPath);
+
+ RootFolder = HostContext.GetDirectory(WellKnownDirectory.Root);
+ Trace.Info("RootFolder: {0}", RootFolder);
+
+ _configFilePath = hostContext.GetConfigFile(WellKnownConfigFile.Runner);
+ Trace.Info("ConfigFilePath: {0}", _configFilePath);
+
+ _credFilePath = hostContext.GetConfigFile(WellKnownConfigFile.Credentials);
+ Trace.Info("CredFilePath: {0}", _credFilePath);
+
+ _serviceConfigFilePath = hostContext.GetConfigFile(WellKnownConfigFile.Service);
+ Trace.Info("ServiceConfigFilePath: {0}", _serviceConfigFilePath);
+
+ _runtimeOptionsFilePath = hostContext.GetConfigFile(WellKnownConfigFile.Options);
+ Trace.Info("RuntimeOptionsFilePath: {0}", _runtimeOptionsFilePath);
+ }
+
+ public string RootFolder { get; private set; }
+
+ public bool HasCredentials()
+ {
+ ArgUtil.Equal(RunMode.Normal, HostContext.RunMode, nameof(HostContext.RunMode));
+ Trace.Info("HasCredentials()");
+ bool credsStored = (new FileInfo(_credFilePath)).Exists;
+ Trace.Info("stored {0}", credsStored);
+ return credsStored;
+ }
+
+ public bool IsConfigured()
+ {
+ Trace.Info("IsConfigured()");
+ bool configured = HostContext.RunMode == RunMode.Local || (new FileInfo(_configFilePath)).Exists;
+ Trace.Info("IsConfigured: {0}", configured);
+ return configured;
+ }
+
+ public bool IsServiceConfigured()
+ {
+ ArgUtil.Equal(RunMode.Normal, HostContext.RunMode, nameof(HostContext.RunMode));
+ Trace.Info("IsServiceConfigured()");
+ bool serviceConfigured = (new FileInfo(_serviceConfigFilePath)).Exists;
+ Trace.Info($"IsServiceConfigured: {serviceConfigured}");
+ return serviceConfigured;
+ }
+
+ public CredentialData GetCredentials()
+ {
+ ArgUtil.Equal(RunMode.Normal, HostContext.RunMode, nameof(HostContext.RunMode));
+ if (_creds == null)
+ {
+ _creds = IOUtil.LoadObject(_credFilePath);
+ }
+
+ return _creds;
+ }
+
+ public RunnerSettings GetSettings()
+ {
+ if (_settings == null)
+ {
+ RunnerSettings configuredSettings = null;
+ if (File.Exists(_configFilePath))
+ {
+ string json = File.ReadAllText(_configFilePath, Encoding.UTF8);
+ Trace.Info($"Read setting file: {json.Length} chars");
+ configuredSettings = StringUtil.ConvertFromJson(json);
+ }
+
+ ArgUtil.NotNull(configuredSettings, nameof(configuredSettings));
+ _settings = configuredSettings;
+ }
+
+ return _settings;
+ }
+
+ public void SaveCredential(CredentialData credential)
+ {
+ ArgUtil.Equal(RunMode.Normal, HostContext.RunMode, nameof(HostContext.RunMode));
+ Trace.Info("Saving {0} credential @ {1}", credential.Scheme, _credFilePath);
+ if (File.Exists(_credFilePath))
+ {
+ // Delete existing credential file first, since the file is hidden and not able to overwrite.
+ Trace.Info("Delete exist runner credential file.");
+ IOUtil.DeleteFile(_credFilePath);
+ }
+
+ IOUtil.SaveObject(credential, _credFilePath);
+ Trace.Info("Credentials Saved.");
+ File.SetAttributes(_credFilePath, File.GetAttributes(_credFilePath) | FileAttributes.Hidden);
+ }
+
+ public void SaveSettings(RunnerSettings settings)
+ {
+ ArgUtil.Equal(RunMode.Normal, HostContext.RunMode, nameof(HostContext.RunMode));
+ Trace.Info("Saving runner settings.");
+ if (File.Exists(_configFilePath))
+ {
+ // Delete existing runner settings file first, since the file is hidden and not able to overwrite.
+ Trace.Info("Delete exist runner settings file.");
+ IOUtil.DeleteFile(_configFilePath);
+ }
+
+ IOUtil.SaveObject(settings, _configFilePath);
+ Trace.Info("Settings Saved.");
+ File.SetAttributes(_configFilePath, File.GetAttributes(_configFilePath) | FileAttributes.Hidden);
+ }
+
+ public void DeleteCredential()
+ {
+ ArgUtil.Equal(RunMode.Normal, HostContext.RunMode, nameof(HostContext.RunMode));
+ IOUtil.Delete(_credFilePath, default(CancellationToken));
+ }
+
+ public void DeleteSettings()
+ {
+ ArgUtil.Equal(RunMode.Normal, HostContext.RunMode, nameof(HostContext.RunMode));
+ IOUtil.Delete(_configFilePath, default(CancellationToken));
+ }
+
+ public RunnerRuntimeOptions GetRunnerRuntimeOptions()
+ {
+ if (_runtimeOptions == null && File.Exists(_runtimeOptionsFilePath))
+ {
+ _runtimeOptions = IOUtil.LoadObject(_runtimeOptionsFilePath);
+ }
+
+ return _runtimeOptions;
+ }
+
+ public void SaveRunnerRuntimeOptions(RunnerRuntimeOptions options)
+ {
+ Trace.Info("Saving runtime options.");
+ if (File.Exists(_runtimeOptionsFilePath))
+ {
+ // Delete existing runtime options file first, since the file is hidden and not able to overwrite.
+ Trace.Info("Delete exist runtime options file.");
+ IOUtil.DeleteFile(_runtimeOptionsFilePath);
+ }
+
+ IOUtil.SaveObject(options, _runtimeOptionsFilePath);
+ Trace.Info("Options Saved.");
+ File.SetAttributes(_runtimeOptionsFilePath, File.GetAttributes(_runtimeOptionsFilePath) | FileAttributes.Hidden);
+ }
+
+ public void DeleteRunnerRuntimeOptions()
+ {
+ IOUtil.Delete(_runtimeOptionsFilePath, default(CancellationToken));
+ }
+ }
+}
diff --git a/src/Runner.Common/Constants.cs b/src/Runner.Common/Constants.cs
new file mode 100644
index 00000000000..9825b07053f
--- /dev/null
+++ b/src/Runner.Common/Constants.cs
@@ -0,0 +1,343 @@
+using System;
+
+namespace GitHub.Runner.Common
+{
+ public enum RunMode
+ {
+ Normal, // Keep "Normal" first (default value).
+ Local,
+ }
+
+ public enum WellKnownDirectory
+ {
+ Bin,
+ Diag,
+ Externals,
+ Root,
+ Actions,
+ Temp,
+ Tools,
+ Update,
+ Work,
+ }
+
+ public enum WellKnownConfigFile
+ {
+ Runner,
+ Credentials,
+ RSACredentials,
+ Service,
+ CredentialStore,
+ Certificates,
+ Proxy,
+ ProxyCredentials,
+ ProxyBypass,
+ Options,
+ }
+
+ public static class Constants
+ {
+ /// Path environment variable name.
+#if OS_WINDOWS
+ public static readonly string PathVariable = "Path";
+#else
+ public static readonly string PathVariable = "PATH";
+#endif
+
+ public static string ProcessTrackingId = "RUNNER_TRACKING_ID";
+ public static string PluginTracePrefix = "##[plugin.trace]";
+ public static readonly int RunnerDownloadRetryMaxAttempts = 3;
+
+ // This enum is embedded within the Constants class to make it easier to reference and avoid
+ // ambiguous type reference with System.Runtime.InteropServices.OSPlatform and System.Runtime.InteropServices.Architecture
+ public enum OSPlatform
+ {
+ OSX,
+ Linux,
+ Windows
+ }
+
+ public enum Architecture
+ {
+ X86,
+ X64,
+ Arm,
+ Arm64
+ }
+
+ public static class Runner
+ {
+#if OS_LINUX
+ public static readonly OSPlatform Platform = OSPlatform.Linux;
+#elif OS_OSX
+ public static readonly OSPlatform Platform = OSPlatform.OSX;
+#elif OS_WINDOWS
+ public static readonly OSPlatform Platform = OSPlatform.Windows;
+#endif
+
+#if X86
+ public static readonly Architecture PlatformArchitecture = Architecture.X86;
+#elif X64
+ public static readonly Architecture PlatformArchitecture = Architecture.X64;
+#elif ARM
+ public static readonly Architecture PlatformArchitecture = Architecture.Arm;
+#elif ARM64
+ public static readonly Architecture PlatformArchitecture = Architecture.Arm64;
+#endif
+
+ public static readonly TimeSpan ExitOnUnloadTimeout = TimeSpan.FromSeconds(30);
+
+ public static class CommandLine
+ {
+ //if you are adding a new arg, please make sure you update the
+ //validArgs array as well present in the CommandSettings.cs
+ public static class Args
+ {
+ public static readonly string Agent = "agent";
+ public static readonly string Auth = "auth";
+ public static readonly string CollectionName = "collectionname";
+ public static readonly string DeploymentGroupName = "deploymentgroupname";
+ public static readonly string DeploymentPoolName = "deploymentpoolname";
+ public static readonly string DeploymentGroupTags = "deploymentgrouptags";
+ public static readonly string MachineGroupName = "machinegroupname";
+ public static readonly string MachineGroupTags = "machinegrouptags";
+ public static readonly string Matrix = "matrix";
+ public static readonly string MonitorSocketAddress = "monitorsocketaddress";
+ public static readonly string NotificationPipeName = "notificationpipename";
+ public static readonly string NotificationSocketAddress = "notificationsocketaddress";
+ public static readonly string Pool = "pool";
+ public static readonly string ProjectName = "projectname";
+ public static readonly string ProxyUrl = "proxyurl";
+ public static readonly string ProxyUserName = "proxyusername";
+ public static readonly string SslCACert = "sslcacert";
+ public static readonly string SslClientCert = "sslclientcert";
+ public static readonly string SslClientCertKey = "sslclientcertkey";
+ public static readonly string SslClientCertArchive = "sslclientcertarchive";
+ public static readonly string SslClientCertPassword = "sslclientcertpassword";
+ public static readonly string StartupType = "startuptype";
+ public static readonly string Url = "url";
+ public static readonly string UserName = "username";
+ public static readonly string WindowsLogonAccount = "windowslogonaccount";
+ public static readonly string Work = "work";
+ public static readonly string Yml = "yml";
+
+ // Secret args. Must be added to the "Secrets" getter as well.
+ public static readonly string Password = "password";
+ public static readonly string ProxyPassword = "proxypassword";
+ public static readonly string Token = "token";
+ public static readonly string WindowsLogonPassword = "windowslogonpassword";
+ public static string[] Secrets => new[]
+ {
+ Password,
+ ProxyPassword,
+ SslClientCertPassword,
+ Token,
+ WindowsLogonPassword,
+ };
+ }
+
+ public static class Commands
+ {
+ public static readonly string Configure = "configure";
+ public static readonly string LocalRun = "localRun";
+ public static readonly string Remove = "remove";
+ public static readonly string Run = "run";
+ public static readonly string Warmup = "warmup";
+ }
+
+ //if you are adding a new flag, please make sure you update the
+ //validFlags array as well present in the CommandSettings.cs
+ public static class Flags
+ {
+ public static readonly string AcceptTeeEula = "acceptteeeula";
+ public static readonly string AddDeploymentGroupTags = "adddeploymentgrouptags";
+ public static readonly string AddMachineGroupTags = "addmachinegrouptags";
+ public static readonly string Commit = "commit";
+ public static readonly string DeploymentGroup = "deploymentgroup";
+ public static readonly string DeploymentPool = "deploymentpool";
+ public static readonly string OverwriteAutoLogon = "overwriteautologon";
+ public static readonly string GitUseSChannel = "gituseschannel";
+ public static readonly string Help = "help";
+ public static readonly string MachineGroup = "machinegroup";
+ public static readonly string Replace = "replace";
+ public static readonly string NoRestart = "norestart";
+ public static readonly string LaunchBrowser = "launchbrowser";
+ public static readonly string Once = "once";
+ public static readonly string RunAsAutoLogon = "runasautologon";
+ public static readonly string RunAsService = "runasservice";
+ public static readonly string SslSkipCertValidation = "sslskipcertvalidation";
+ public static readonly string Unattended = "unattended";
+ public static readonly string Version = "version";
+ public static readonly string WhatIf = "whatif";
+ }
+ }
+
+ public static class ReturnCode
+ {
+ public const int Success = 0;
+ public const int TerminatedError = 1;
+ public const int RetryableError = 2;
+ public const int RunnerUpdating = 3;
+ public const int RunOnceRunnerUpdating = 4;
+ }
+ }
+
+ public static class Pipeline
+ {
+ public static class Path
+ {
+ public static readonly string PipelineMappingDirectory = "_PipelineMapping";
+ public static readonly string TrackingConfigFile = "PipelineFolder.json";
+ }
+ }
+
+ public static class Configuration
+ {
+ public static readonly string AAD = "AAD";
+ public static readonly string OAuthAccessToken = "OAuthAccessToken";
+ public static readonly string PAT = "PAT";
+ public static readonly string OAuth = "OAuth";
+ }
+
+ public static class Expressions
+ {
+ public static readonly string Always = "always";
+ public static readonly string Canceled = "canceled";
+ public static readonly string Cancelled = "cancelled";
+ public static readonly string Failed = "failed";
+ public static readonly string Failure = "failure";
+ public static readonly string Success = "success";
+ public static readonly string Succeeded = "succeeded";
+ public static readonly string SucceededOrFailed = "succeededOrFailed";
+ public static readonly string Variables = "variables";
+ }
+
+ public static class Path
+ {
+ public static readonly string ActionsDirectory = "_actions";
+ public static readonly string ActionManifestFile = "action.yml";
+ public static readonly string BinDirectory = "bin";
+ public static readonly string DiagDirectory = "_diag";
+ public static readonly string ExternalsDirectory = "externals";
+ public static readonly string RunnerDiagnosticLogPrefix = "Runner_";
+ public static readonly string TempDirectory = "_temp";
+ public static readonly string TeeDirectory = "tee";
+ public static readonly string ToolDirectory = "_tool";
+ public static readonly string TaskJsonFile = "task.json";
+ public static readonly string UpdateDirectory = "_update";
+ public static readonly string WorkDirectory = "_work";
+ public static readonly string WorkerDiagnosticLogPrefix = "Worker_";
+ }
+
+ // Related to definition variables.
+ public static class Variables
+ {
+ public static readonly string MacroPrefix = "$(";
+ public static readonly string MacroSuffix = ")";
+
+ public static class Actions
+ {
+ //
+ // Keep alphabetical
+ //
+ public static readonly string RunnerDebug = "ACTIONS_RUNNER_DEBUG";
+ public static readonly string StepDebug = "ACTIONS_STEP_DEBUG";
+ }
+
+ public static class Agent
+ {
+ //
+ // Keep alphabetical
+ //
+ public static readonly string AcceptTeeEula = "agent.acceptteeeula";
+ public static readonly string AllowAllEndpoints = "agent.allowAllEndpoints"; // remove after sprint 120 or so.
+ public static readonly string AllowAllSecureFiles = "agent.allowAllSecureFiles"; // remove after sprint 121 or so.
+ public static readonly string BuildDirectory = "agent.builddirectory";
+ public static readonly string ContainerId = "agent.containerid";
+ public static readonly string ContainerNetwork = "agent.containernetwork";
+ public static readonly string HomeDirectory = "agent.homedirectory";
+ public static readonly string Id = "agent.id";
+ public static readonly string GitUseSChannel = "agent.gituseschannel";
+ public static readonly string JobName = "agent.jobname";
+ public static readonly string MachineName = "agent.machinename";
+ public static readonly string Name = "agent.name";
+ public static readonly string OS = "agent.os";
+ public static readonly string OSArchitecture = "agent.osarchitecture";
+ public static readonly string OSVersion = "agent.osversion";
+ public static readonly string ProxyUrl = "agent.proxyurl";
+ public static readonly string ProxyUsername = "agent.proxyusername";
+ public static readonly string ProxyPassword = "agent.proxypassword";
+ public static readonly string ProxyBypassList = "agent.proxybypasslist";
+ public static readonly string RetainDefaultEncoding = "agent.retainDefaultEncoding";
+ public static readonly string RootDirectory = "agent.RootDirectory";
+ public static readonly string RunMode = "agent.runmode";
+ public static readonly string ServerOMDirectory = "agent.ServerOMDirectory";
+ public static readonly string ServicePortPrefix = "agent.services";
+ public static readonly string SslCAInfo = "agent.cainfo";
+ public static readonly string SslClientCert = "agent.clientcert";
+ public static readonly string SslClientCertKey = "agent.clientcertkey";
+ public static readonly string SslClientCertArchive = "agent.clientcertarchive";
+ public static readonly string SslClientCertPassword = "agent.clientcertpassword";
+ public static readonly string SslSkipCertValidation = "agent.skipcertvalidation";
+ public static readonly string TempDirectory = "agent.TempDirectory";
+ public static readonly string ToolsDirectory = "agent.ToolsDirectory";
+ public static readonly string Version = "agent.version";
+ public static readonly string WorkFolder = "agent.workfolder";
+ public static readonly string WorkingDirectory = "agent.WorkingDirectory";
+ }
+
+ public static class Build
+ {
+ //
+ // Keep alphabetical
+ //
+ public static readonly string ArtifactStagingDirectory = "build.artifactstagingdirectory";
+ public static readonly string BinariesDirectory = "build.binariesdirectory";
+ public static readonly string Number = "build.buildNumber";
+ public static readonly string Clean = "build.clean";
+ public static readonly string DefinitionName = "build.definitionname";
+ public static readonly string GatedRunCI = "build.gated.runci";
+ public static readonly string GatedShelvesetName = "build.gated.shelvesetname";
+ public static readonly string RepoClean = "build.repository.clean";
+ public static readonly string RepoGitSubmoduleCheckout = "build.repository.git.submodulecheckout";
+ public static readonly string RepoId = "build.repository.id";
+ public static readonly string RepoLocalPath = "build.repository.localpath";
+ public static readonly string RepoName = "build.Repository.name";
+ public static readonly string RepoProvider = "build.repository.provider";
+ public static readonly string RepoTfvcWorkspace = "build.repository.tfvc.workspace";
+ public static readonly string RepoUri = "build.repository.uri";
+ public static readonly string SourceBranch = "build.sourcebranch";
+ public static readonly string SourceTfvcShelveset = "build.sourcetfvcshelveset";
+ public static readonly string SourceVersion = "build.sourceversion";
+ public static readonly string SourcesDirectory = "build.sourcesdirectory";
+ public static readonly string StagingDirectory = "build.stagingdirectory";
+ public static readonly string SyncSources = "build.syncSources";
+ }
+
+
+ public static class System
+ {
+ //
+ // Keep alphabetical
+ //
+ public static readonly string AccessToken = "system.accessToken";
+ public static readonly string ArtifactsDirectory = "system.artifactsdirectory";
+ public static readonly string CollectionId = "system.collectionid";
+ public static readonly string Culture = "system.culture";
+ public static readonly string DefaultWorkingDirectory = "system.defaultworkingdirectory";
+ public static readonly string DefinitionId = "system.definitionid";
+ public static readonly string EnableAccessToken = "system.enableAccessToken";
+ public static readonly string HostType = "system.hosttype";
+ public static readonly string PhaseDisplayName = "system.phaseDisplayName";
+ public static readonly string PreferGitFromPath = "system.prefergitfrompath";
+ public static readonly string PullRequestTargetBranchName = "system.pullrequest.targetbranch";
+ public static readonly string SelfManageGitCreds = "system.selfmanagegitcreds";
+ public static readonly string ServerType = "system.servertype";
+ public static readonly string TFServerUrl = "system.TeamFoundationServerUri"; // back compat variable, do not document
+ public static readonly string TeamProject = "system.teamproject";
+ public static readonly string TeamProjectId = "system.teamProjectId";
+ public static readonly string WorkFolder = "system.workfolder";
+ }
+ }
+ }
+}
diff --git a/src/Runner.Common/CredentialData.cs b/src/Runner.Common/CredentialData.cs
new file mode 100644
index 00000000000..86e93786a6e
--- /dev/null
+++ b/src/Runner.Common/CredentialData.cs
@@ -0,0 +1,24 @@
+using System;
+using System.Collections.Generic;
+
+namespace GitHub.Runner.Common
+{
+ public sealed class CredentialData
+ {
+ public string Scheme { get; set; }
+
+ public Dictionary Data
+ {
+ get
+ {
+ if (_data == null)
+ {
+ _data = new Dictionary(StringComparer.OrdinalIgnoreCase);
+ }
+ return _data;
+ }
+ }
+
+ private Dictionary _data;
+ }
+}
diff --git a/src/Runner.Common/Exceptions.cs b/src/Runner.Common/Exceptions.cs
new file mode 100644
index 00000000000..83c6edd3fa4
--- /dev/null
+++ b/src/Runner.Common/Exceptions.cs
@@ -0,0 +1,19 @@
+using System;
+
+namespace GitHub.Runner.Common
+{
+ public class NonRetryableException : Exception
+ {
+ public NonRetryableException()
+ : base()
+ { }
+
+ public NonRetryableException(string message)
+ : base(message)
+ { }
+
+ public NonRetryableException(string message, Exception inner)
+ : base(message, inner)
+ { }
+ }
+}
diff --git a/src/Runner.Common/ExtensionManager.cs b/src/Runner.Common/ExtensionManager.cs
new file mode 100644
index 00000000000..dbbb060ae50
--- /dev/null
+++ b/src/Runner.Common/ExtensionManager.cs
@@ -0,0 +1,80 @@
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+using System;
+using System.Collections.Concurrent;
+using System.Collections.Generic;
+using System.Linq;
+
+namespace GitHub.Runner.Common
+{
+ [ServiceLocator(Default = typeof(ExtensionManager))]
+ public interface IExtensionManager : IRunnerService
+ {
+ List GetExtensions() where T : class, IExtension;
+ }
+
+ public sealed class ExtensionManager : RunnerService, IExtensionManager
+ {
+ private readonly ConcurrentDictionary> _cache = new ConcurrentDictionary>();
+
+ public List GetExtensions() where T : class, IExtension
+ {
+ Trace.Info("Getting extensions for interface: '{0}'", typeof(T).FullName);
+ List extensions = _cache.GetOrAdd(
+ key: typeof(T),
+ valueFactory: (Type key) =>
+ {
+ return LoadExtensions();
+ });
+ return extensions.Select(x => x as T).ToList();
+ }
+
+ //
+ // We will load extensions from assembly
+ // once AssemblyLoadContext.Resolving event is able to
+ // resolve dependency recursively
+ //
+ private List LoadExtensions() where T : class, IExtension
+ {
+ var extensions = new List();
+ switch (typeof(T).FullName)
+ {
+ // Listener capabilities providers.
+ case "GitHub.Runner.Common.Capabilities.ICapabilitiesProvider":
+ Add(extensions, "GitHub.Runner.Common.Capabilities.RunnerCapabilitiesProvider, Runner.Common");
+ break;
+ // Action command extensions.
+ case "GitHub.Runner.Worker.IActionCommandExtension":
+ Add(extensions, "GitHub.Runner.Worker.InternalPluginSetRepoPathCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.SetEnvCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.SetOutputCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.SaveStateCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.AddPathCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.AddMaskCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.AddMatcherCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.RemoveMatcherCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.WarningCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.ErrorCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.DebugCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.GroupCommandExtension, Runner.Worker");
+ Add(extensions, "GitHub.Runner.Worker.EndGroupCommandExtension, Runner.Worker");
+ break;
+ default:
+ // This should never happen.
+ throw new NotSupportedException($"Unexpected extension type: '{typeof(T).FullName}'");
+ }
+
+ return extensions;
+ }
+
+ private void Add(List extensions, string assemblyQualifiedName) where T : class, IExtension
+ {
+ Trace.Info($"Creating instance: {assemblyQualifiedName}");
+ Type type = Type.GetType(assemblyQualifiedName, throwOnError: true);
+ var extension = Activator.CreateInstance(type) as T;
+ ArgUtil.NotNull(extension, nameof(extension));
+ extension.Initialize(HostContext);
+ extensions.Add(extension);
+ }
+ }
+}
diff --git a/src/Runner.Common/Extensions.cs b/src/Runner.Common/Extensions.cs
new file mode 100644
index 00000000000..fce2e6c16e8
--- /dev/null
+++ b/src/Runner.Common/Extensions.cs
@@ -0,0 +1,30 @@
+using System;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace GitHub.Runner.Common
+{
+ //this code is documented on http://blogs.msdn.com/b/pfxteam/archive/2012/10/05/how-do-i-cancel-non-cancelable-async-operations.aspx
+ public static class Extensions
+ {
+ public static async Task WithCancellation(this Task task, CancellationToken cancellationToken)
+ {
+ var tcs = new TaskCompletionSource();
+ using (cancellationToken.Register(
+ s => ((TaskCompletionSource)s).TrySetResult(true), tcs))
+ if (task != await Task.WhenAny(task, tcs.Task))
+ throw new OperationCanceledException(cancellationToken);
+ return await task;
+ }
+
+ public static async Task WithCancellation(this Task task, CancellationToken cancellationToken)
+ {
+ var tcs = new TaskCompletionSource();
+ using (cancellationToken.Register(
+ s => ((TaskCompletionSource)s).TrySetResult(true), tcs))
+ if (task != await Task.WhenAny(task, tcs.Task))
+ throw new OperationCanceledException(cancellationToken);
+ await task;
+ }
+ }
+}
diff --git a/src/Runner.Common/HostContext.cs b/src/Runner.Common/HostContext.cs
new file mode 100644
index 00000000000..8cb8d010371
--- /dev/null
+++ b/src/Runner.Common/HostContext.cs
@@ -0,0 +1,597 @@
+using GitHub.Runner.Common.Util;
+using System;
+using System.Collections.Concurrent;
+using System.Collections.Generic;
+using System.Globalization;
+using System.IO;
+using System.Linq;
+using System.Reflection;
+using System.Runtime.Loader;
+using System.Threading;
+using System.Threading.Tasks;
+using System.Diagnostics;
+using System.Net.Http;
+using System.Diagnostics.Tracing;
+using GitHub.DistributedTask.Logging;
+using System.Net.Http.Headers;
+using GitHub.Runner.Sdk;
+
+namespace GitHub.Runner.Common
+{
+ public interface IHostContext : IDisposable
+ {
+ RunMode RunMode { get; set; }
+ StartupType StartupType { get; set; }
+ CancellationToken RunnerShutdownToken { get; }
+ ShutdownReason RunnerShutdownReason { get; }
+ ISecretMasker SecretMasker { get; }
+ ProductInfoHeaderValue UserAgent { get; }
+ string GetDirectory(WellKnownDirectory directory);
+ string GetConfigFile(WellKnownConfigFile configFile);
+ Tracing GetTrace(string name);
+ Task Delay(TimeSpan delay, CancellationToken cancellationToken);
+ T CreateService() where T : class, IRunnerService;
+ T GetService() where T : class, IRunnerService;
+ void SetDefaultCulture(string name);
+ event EventHandler Unloading;
+ void ShutdownRunner(ShutdownReason reason);
+ void WritePerfCounter(string counter);
+ }
+
+ public enum StartupType
+ {
+ Manual,
+ Service,
+ AutoStartup
+ }
+
+ public sealed class HostContext : EventListener, IObserver, IObserver>, IHostContext, IDisposable
+ {
+ private const int _defaultLogPageSize = 8; //MB
+ private static int _defaultLogRetentionDays = 30;
+ private static int[] _vssHttpMethodEventIds = new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 24 };
+ private static int[] _vssHttpCredentialEventIds = new int[] { 11, 13, 14, 15, 16, 17, 18, 20, 21, 22, 27, 29 };
+ private readonly ConcurrentDictionary _serviceInstances = new ConcurrentDictionary();
+ private readonly ConcurrentDictionary _serviceTypes = new ConcurrentDictionary();
+ private readonly ISecretMasker _secretMasker = new SecretMasker();
+ private readonly ProductInfoHeaderValue _userAgent = new ProductInfoHeaderValue($"GitHubActionsRunner-{BuildConstants.RunnerPackage.PackageName}", BuildConstants.RunnerPackage.Version);
+ private CancellationTokenSource _runnerShutdownTokenSource = new CancellationTokenSource();
+ private object _perfLock = new object();
+ private RunMode _runMode = RunMode.Normal;
+ private Tracing _trace;
+ private Tracing _vssTrace;
+ private Tracing _httpTrace;
+ private ITraceManager _traceManager;
+ private AssemblyLoadContext _loadContext;
+ private IDisposable _httpTraceSubscription;
+ private IDisposable _diagListenerSubscription;
+ private StartupType _startupType;
+ private string _perfFile;
+
+ public event EventHandler Unloading;
+ public CancellationToken RunnerShutdownToken => _runnerShutdownTokenSource.Token;
+ public ShutdownReason RunnerShutdownReason { get; private set; }
+ public ISecretMasker SecretMasker => _secretMasker;
+ public ProductInfoHeaderValue UserAgent => _userAgent;
+ public HostContext(string hostType, string logFile = null)
+ {
+ // Validate args.
+ ArgUtil.NotNullOrEmpty(hostType, nameof(hostType));
+
+ _loadContext = AssemblyLoadContext.GetLoadContext(typeof(HostContext).GetTypeInfo().Assembly);
+ _loadContext.Unloading += LoadContext_Unloading;
+
+ this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscape);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscapeShift1);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscapeShift2);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscapeShift3);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscapeShift4);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.Base64StringEscapeShift5);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.ExpressionStringEscape);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.JsonStringEscape);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.UriDataEscape);
+ this.SecretMasker.AddValueEncoder(ValueEncoders.XmlDataEscape);
+
+ // Create the trace manager.
+ if (string.IsNullOrEmpty(logFile))
+ {
+ int logPageSize;
+ string logSizeEnv = Environment.GetEnvironmentVariable($"{hostType.ToUpperInvariant()}_LOGSIZE");
+ if (!string.IsNullOrEmpty(logSizeEnv) || !int.TryParse(logSizeEnv, out logPageSize))
+ {
+ logPageSize = _defaultLogPageSize;
+ }
+
+ int logRetentionDays;
+ string logRetentionDaysEnv = Environment.GetEnvironmentVariable($"{hostType.ToUpperInvariant()}_LOGRETENTION");
+ if (!string.IsNullOrEmpty(logRetentionDaysEnv) || !int.TryParse(logRetentionDaysEnv, out logRetentionDays))
+ {
+ logRetentionDays = _defaultLogRetentionDays;
+ }
+
+ // this should give us _diag folder under runner root directory
+ string diagLogDirectory = Path.Combine(new DirectoryInfo(Path.GetDirectoryName(Assembly.GetEntryAssembly().Location)).Parent.FullName, Constants.Path.DiagDirectory);
+ _traceManager = new TraceManager(new HostTraceListener(diagLogDirectory, hostType, logPageSize, logRetentionDays), this.SecretMasker);
+ }
+ else
+ {
+ _traceManager = new TraceManager(new HostTraceListener(logFile), this.SecretMasker);
+ }
+
+ _trace = GetTrace(nameof(HostContext));
+ _vssTrace = GetTrace("GitHubActionsRunner"); // VisualStudioService
+
+ // Enable Http trace
+ bool enableHttpTrace;
+ if (bool.TryParse(Environment.GetEnvironmentVariable("GITHUB_ACTIONS_RUNNER_HTTPTRACE"), out enableHttpTrace) && enableHttpTrace)
+ {
+ _trace.Warning("*****************************************************************************************");
+ _trace.Warning("** **");
+ _trace.Warning("** Http trace is enabled, all your http traffic will be dumped into runner diag log. **");
+ _trace.Warning("** DO NOT share the log in public place! The trace may contains secrets in plain text. **");
+ _trace.Warning("** **");
+ _trace.Warning("*****************************************************************************************");
+
+ _httpTrace = GetTrace("HttpTrace");
+ _diagListenerSubscription = DiagnosticListener.AllListeners.Subscribe(this);
+ }
+
+ // Enable perf counter trace
+ string perfCounterLocation = Environment.GetEnvironmentVariable("RUNNER_PERFLOG");
+ if (!string.IsNullOrEmpty(perfCounterLocation))
+ {
+ try
+ {
+ Directory.CreateDirectory(perfCounterLocation);
+ _perfFile = Path.Combine(perfCounterLocation, $"{hostType}.perf");
+ }
+ catch (Exception ex)
+ {
+ _trace.Error(ex);
+ }
+ }
+ }
+
+ public RunMode RunMode
+ {
+ get
+ {
+ return _runMode;
+ }
+
+ set
+ {
+ _trace.Info($"Set run mode: {value}");
+ _runMode = value;
+ }
+ }
+
+ public string GetDirectory(WellKnownDirectory directory)
+ {
+ string path;
+ switch (directory)
+ {
+ case WellKnownDirectory.Bin:
+ path = Path.GetDirectoryName(Assembly.GetEntryAssembly().Location);
+ break;
+
+ case WellKnownDirectory.Diag:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ Constants.Path.DiagDirectory);
+ break;
+
+ case WellKnownDirectory.Externals:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ Constants.Path.ExternalsDirectory);
+ break;
+
+ case WellKnownDirectory.Root:
+ path = new DirectoryInfo(GetDirectory(WellKnownDirectory.Bin)).Parent.FullName;
+ break;
+
+ case WellKnownDirectory.Temp:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Work),
+ Constants.Path.TempDirectory);
+ break;
+
+ case WellKnownDirectory.Actions:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Work),
+ Constants.Path.ActionsDirectory);
+ break;
+
+ case WellKnownDirectory.Tools:
+ // TODO: Coallesce to just check RUNNER_TOOL_CACHE when images stabilize
+ path = Environment.GetEnvironmentVariable("RUNNER_TOOL_CACHE") ?? Environment.GetEnvironmentVariable("RUNNER_TOOLSDIRECTORY") ?? Environment.GetEnvironmentVariable("AGENT_TOOLSDIRECTORY") ?? Environment.GetEnvironmentVariable(Constants.Variables.Agent.ToolsDirectory);
+ if (string.IsNullOrEmpty(path))
+ {
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Work),
+ Constants.Path.ToolDirectory);
+ }
+ break;
+
+ case WellKnownDirectory.Update:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Work),
+ Constants.Path.UpdateDirectory);
+ break;
+
+ case WellKnownDirectory.Work:
+ var configurationStore = GetService();
+ RunnerSettings settings = configurationStore.GetSettings();
+ ArgUtil.NotNull(settings, nameof(settings));
+ ArgUtil.NotNullOrEmpty(settings.WorkFolder, nameof(settings.WorkFolder));
+ path = Path.GetFullPath(Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ settings.WorkFolder));
+ break;
+
+ default:
+ throw new NotSupportedException($"Unexpected well known directory: '{directory}'");
+ }
+
+ _trace.Info($"Well known directory '{directory}': '{path}'");
+ return path;
+ }
+
+ public string GetConfigFile(WellKnownConfigFile configFile)
+ {
+ string path;
+ switch (configFile)
+ {
+ case WellKnownConfigFile.Runner:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".runner");
+ break;
+
+ case WellKnownConfigFile.Credentials:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".credentials");
+ break;
+
+ case WellKnownConfigFile.RSACredentials:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".credentials_rsaparams");
+ break;
+
+ case WellKnownConfigFile.Service:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".service");
+ break;
+
+ case WellKnownConfigFile.CredentialStore:
+#if OS_OSX
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".credential_store.keychain");
+#else
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".credential_store");
+#endif
+ break;
+
+ case WellKnownConfigFile.Certificates:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".certificates");
+ break;
+
+ case WellKnownConfigFile.Proxy:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".proxy");
+ break;
+
+ case WellKnownConfigFile.ProxyCredentials:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".proxycredentials");
+ break;
+
+ case WellKnownConfigFile.ProxyBypass:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".proxybypass");
+ break;
+
+ case WellKnownConfigFile.Options:
+ path = Path.Combine(
+ GetDirectory(WellKnownDirectory.Root),
+ ".options");
+ break;
+ default:
+ throw new NotSupportedException($"Unexpected well known config file: '{configFile}'");
+ }
+
+ _trace.Info($"Well known config file '{configFile}': '{path}'");
+ return path;
+ }
+
+ public Tracing GetTrace(string name)
+ {
+ return _traceManager[name];
+ }
+
+ public async Task Delay(TimeSpan delay, CancellationToken cancellationToken)
+ {
+ await Task.Delay(delay, cancellationToken);
+ }
+
+ ///
+ /// Creates a new instance of T.
+ ///
+ public T CreateService() where T : class, IRunnerService
+ {
+ Type target;
+ if (!_serviceTypes.TryGetValue(typeof(T), out target))
+ {
+ // Infer the concrete type from the ServiceLocatorAttribute.
+ CustomAttributeData attribute = typeof(T)
+ .GetTypeInfo()
+ .CustomAttributes
+ .FirstOrDefault(x => x.AttributeType == typeof(ServiceLocatorAttribute));
+ if (attribute != null)
+ {
+ foreach (CustomAttributeNamedArgument arg in attribute.NamedArguments)
+ {
+ if (string.Equals(arg.MemberName, ServiceLocatorAttribute.DefaultPropertyName, StringComparison.Ordinal))
+ {
+ target = arg.TypedValue.Value as Type;
+ }
+ }
+ }
+
+ if (target == null)
+ {
+ throw new KeyNotFoundException(string.Format(CultureInfo.InvariantCulture, "Service mapping not found for key '{0}'.", typeof(T).FullName));
+ }
+
+ _serviceTypes.TryAdd(typeof(T), target);
+ target = _serviceTypes[typeof(T)];
+ }
+
+ // Create a new instance.
+ T svc = Activator.CreateInstance(target) as T;
+ svc.Initialize(this);
+ return svc;
+ }
+
+ ///
+ /// Gets or creates an instance of T.
+ ///
+ public T GetService() where T : class, IRunnerService
+ {
+ // Return the cached instance if one already exists.
+ object instance;
+ if (_serviceInstances.TryGetValue(typeof(T), out instance))
+ {
+ return instance as T;
+ }
+
+ // Otherwise create a new instance and try to add it to the cache.
+ _serviceInstances.TryAdd(typeof(T), CreateService());
+
+ // Return the instance from the cache.
+ return _serviceInstances[typeof(T)] as T;
+ }
+
+ public void SetDefaultCulture(string name)
+ {
+ ArgUtil.NotNull(name, nameof(name));
+ _trace.Verbose($"Setting default culture and UI culture to: '{name}'");
+ CultureInfo.DefaultThreadCurrentCulture = new CultureInfo(name);
+ CultureInfo.DefaultThreadCurrentUICulture = new CultureInfo(name);
+ }
+
+
+ public void ShutdownRunner(ShutdownReason reason)
+ {
+ ArgUtil.NotNull(reason, nameof(reason));
+ _trace.Info($"Runner will be shutdown for {reason.ToString()}");
+ RunnerShutdownReason = reason;
+ _runnerShutdownTokenSource.Cancel();
+ }
+
+ public override void Dispose()
+ {
+ Dispose(true);
+ GC.SuppressFinalize(this);
+ }
+
+ public StartupType StartupType
+ {
+ get
+ {
+ return _startupType;
+ }
+ set
+ {
+ _startupType = value;
+ }
+ }
+
+ public void WritePerfCounter(string counter)
+ {
+ if (!string.IsNullOrEmpty(_perfFile))
+ {
+ string normalizedCounter = counter.Replace(':', '_');
+ lock (_perfLock)
+ {
+ try
+ {
+ File.AppendAllLines(_perfFile, new[] { $"{normalizedCounter}:{DateTime.UtcNow.ToString("O")}" });
+ }
+ catch (Exception ex)
+ {
+ _trace.Error(ex);
+ }
+ }
+ }
+ }
+
+ private void Dispose(bool disposing)
+ {
+ // TODO: Dispose the trace listener also.
+ if (disposing)
+ {
+ if (_loadContext != null)
+ {
+ _loadContext.Unloading -= LoadContext_Unloading;
+ _loadContext = null;
+ }
+ _httpTraceSubscription?.Dispose();
+ _diagListenerSubscription?.Dispose();
+ _traceManager?.Dispose();
+ _traceManager = null;
+
+ _runnerShutdownTokenSource?.Dispose();
+ _runnerShutdownTokenSource = null;
+
+ base.Dispose();
+ }
+ }
+
+ private void LoadContext_Unloading(AssemblyLoadContext obj)
+ {
+ if (Unloading != null)
+ {
+ Unloading(this, null);
+ }
+ }
+
+ void IObserver.OnCompleted()
+ {
+ _httpTrace.Info("DiagListeners finished transmitting data.");
+ }
+
+ void IObserver.OnError(Exception error)
+ {
+ _httpTrace.Error(error);
+ }
+
+ void IObserver.OnNext(DiagnosticListener listener)
+ {
+ if (listener.Name == "HttpHandlerDiagnosticListener" && _httpTraceSubscription == null)
+ {
+ _httpTraceSubscription = listener.Subscribe(this);
+ }
+ }
+
+ void IObserver>.OnCompleted()
+ {
+ _httpTrace.Info("HttpHandlerDiagnosticListener finished transmitting data.");
+ }
+
+ void IObserver>.OnError(Exception error)
+ {
+ _httpTrace.Error(error);
+ }
+
+ void IObserver>.OnNext(KeyValuePair value)
+ {
+ _httpTrace.Info($"Trace {value.Key} event:{Environment.NewLine}{value.Value.ToString()}");
+ }
+
+ protected override void OnEventSourceCreated(EventSource source)
+ {
+ if (source.Name.Equals("Microsoft-VSS-Http"))
+ {
+ EnableEvents(source, EventLevel.Verbose);
+ }
+ }
+
+ protected override void OnEventWritten(EventWrittenEventArgs eventData)
+ {
+ if (eventData == null)
+ {
+ return;
+ }
+
+ string message = eventData.Message;
+ object[] payload = new object[0];
+ if (eventData.Payload != null && eventData.Payload.Count > 0)
+ {
+ payload = eventData.Payload.ToArray();
+ }
+
+ try
+ {
+ if (_vssHttpMethodEventIds.Contains(eventData.EventId))
+ {
+ payload[0] = Enum.Parse(typeof(VssHttpMethod), ((int)payload[0]).ToString());
+ }
+ else if (_vssHttpCredentialEventIds.Contains(eventData.EventId))
+ {
+ payload[0] = Enum.Parse(typeof(GitHub.Services.Common.VssCredentialsType), ((int)payload[0]).ToString());
+ }
+
+ if (payload.Length > 0)
+ {
+ message = String.Format(eventData.Message.Replace("%n", Environment.NewLine), payload);
+ }
+
+ switch (eventData.Level)
+ {
+ case EventLevel.Critical:
+ case EventLevel.Error:
+ _vssTrace.Error(message);
+ break;
+ case EventLevel.Warning:
+ _vssTrace.Warning(message);
+ break;
+ case EventLevel.Informational:
+ _vssTrace.Info(message);
+ break;
+ default:
+ _vssTrace.Verbose(message);
+ break;
+ }
+ }
+ catch (Exception ex)
+ {
+ _vssTrace.Error(ex);
+ _vssTrace.Info(eventData.Message);
+ _vssTrace.Info(string.Join(", ", eventData.Payload?.ToArray() ?? new string[0]));
+ }
+ }
+
+ // Copied from pipelines server code base, used for EventData translation.
+ internal enum VssHttpMethod
+ {
+ UNKNOWN,
+ DELETE,
+ HEAD,
+ GET,
+ OPTIONS,
+ PATCH,
+ POST,
+ PUT,
+ }
+ }
+
+ public static class HostContextExtension
+ {
+ public static HttpClientHandler CreateHttpClientHandler(this IHostContext context)
+ {
+ HttpClientHandler clientHandler = new HttpClientHandler();
+ var runnerWebProxy = context.GetService();
+ clientHandler.Proxy = runnerWebProxy.WebProxy;
+ return clientHandler;
+ }
+ }
+
+ public enum ShutdownReason
+ {
+ UserCancelled = 0,
+ OperatingSystemShutdown = 1,
+ }
+}
diff --git a/src/Runner.Common/HostTraceListener.cs b/src/Runner.Common/HostTraceListener.cs
new file mode 100644
index 00000000000..e11edc5b492
--- /dev/null
+++ b/src/Runner.Common/HostTraceListener.cs
@@ -0,0 +1,202 @@
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+using System;
+using System.Diagnostics;
+using System.Globalization;
+using System.IO;
+using System.Text;
+
+namespace GitHub.Runner.Common
+{
+ public sealed class HostTraceListener : TextWriterTraceListener
+ {
+ private const string _logFileNamingPattern = "{0}_{1:yyyyMMdd-HHmmss}-utc.log";
+ private string _logFileDirectory;
+ private string _logFilePrefix;
+ private bool _enablePageLog = false;
+ private bool _enableLogRetention = false;
+ private int _currentPageSize;
+ private int _pageSizeLimit;
+ private int _retentionDays;
+
+ public HostTraceListener(string logFileDirectory, string logFilePrefix, int pageSizeLimit, int retentionDays)
+ : base()
+ {
+ ArgUtil.NotNullOrEmpty(logFileDirectory, nameof(logFileDirectory));
+ ArgUtil.NotNullOrEmpty(logFilePrefix, nameof(logFilePrefix));
+ _logFileDirectory = logFileDirectory;
+ _logFilePrefix = logFilePrefix;
+
+ Directory.CreateDirectory(_logFileDirectory);
+
+ if (pageSizeLimit > 0)
+ {
+ _enablePageLog = true;
+ _pageSizeLimit = pageSizeLimit * 1024 * 1024;
+ _currentPageSize = 0;
+ }
+
+ if (retentionDays > 0)
+ {
+ _enableLogRetention = true;
+ _retentionDays = retentionDays;
+ }
+
+ Writer = CreatePageLogWriter();
+ }
+
+ public HostTraceListener(string logFile)
+ : base()
+ {
+ ArgUtil.NotNullOrEmpty(logFile, nameof(logFile));
+ Directory.CreateDirectory(Path.GetDirectoryName(logFile));
+ Stream logStream = new FileStream(logFile, FileMode.Create, FileAccess.ReadWrite, FileShare.Read, bufferSize: 4096);
+ Writer = new StreamWriter(logStream);
+ }
+
+ // Copied and modified slightly from .Net Core source code. Modification was required to make it compile.
+ // There must be some TraceFilter extension class that is missing in this source code.
+ public override void TraceEvent(TraceEventCache eventCache, string source, TraceEventType eventType, int id, string message)
+ {
+ if (Filter != null && !Filter.ShouldTrace(eventCache, source, eventType, id, message, null, null, null))
+ {
+ return;
+ }
+
+ WriteHeader(source, eventType, id);
+ WriteLine(message);
+ WriteFooter(eventCache);
+ }
+
+ public override void WriteLine(string message)
+ {
+ base.WriteLine(message);
+ if (_enablePageLog)
+ {
+ int messageSize = UTF8Encoding.UTF8.GetByteCount(message);
+ _currentPageSize += messageSize;
+ if (_currentPageSize > _pageSizeLimit)
+ {
+ Flush();
+ if (Writer != null)
+ {
+ Writer.Dispose();
+ Writer = null;
+ }
+
+ Writer = CreatePageLogWriter();
+ _currentPageSize = 0;
+ }
+ }
+
+ Flush();
+ }
+
+ public override void Write(string message)
+ {
+ base.Write(message);
+ if (_enablePageLog)
+ {
+ int messageSize = UTF8Encoding.UTF8.GetByteCount(message);
+ _currentPageSize += messageSize;
+ }
+
+ Flush();
+ }
+
+ internal bool IsEnabled(TraceOptions opts)
+ {
+ return (opts & TraceOutputOptions) != 0;
+ }
+
+ // Altered from the original .Net Core implementation.
+ private void WriteHeader(string source, TraceEventType eventType, int id)
+ {
+ string type = null;
+ switch (eventType)
+ {
+ case TraceEventType.Critical:
+ type = "CRIT";
+ break;
+ case TraceEventType.Error:
+ type = "ERR ";
+ break;
+ case TraceEventType.Warning:
+ type = "WARN";
+ break;
+ case TraceEventType.Information:
+ type = "INFO";
+ break;
+ case TraceEventType.Verbose:
+ type = "VERB";
+ break;
+ default:
+ type = eventType.ToString();
+ break;
+ }
+
+ Write(StringUtil.Format("[{0:u} {1} {2}] ", DateTime.UtcNow, type, source));
+ }
+
+ // Copied and modified slightly from .Net Core source code to make it compile. The original code
+ // accesses a private indentLevel field. In this code it has been modified to use the getter/setter.
+ private void WriteFooter(TraceEventCache eventCache)
+ {
+ if (eventCache == null)
+ return;
+
+ IndentLevel++;
+ if (IsEnabled(TraceOptions.ProcessId))
+ WriteLine("ProcessId=" + eventCache.ProcessId);
+
+ if (IsEnabled(TraceOptions.ThreadId))
+ WriteLine("ThreadId=" + eventCache.ThreadId);
+
+ if (IsEnabled(TraceOptions.DateTime))
+ WriteLine("DateTime=" + eventCache.DateTime.ToString("o", CultureInfo.InvariantCulture));
+
+ if (IsEnabled(TraceOptions.Timestamp))
+ WriteLine("Timestamp=" + eventCache.Timestamp);
+
+ IndentLevel--;
+ }
+
+ private StreamWriter CreatePageLogWriter()
+ {
+ if (_enableLogRetention)
+ {
+ DirectoryInfo diags = new DirectoryInfo(_logFileDirectory);
+ var logs = diags.GetFiles($"{_logFilePrefix}*.log");
+ foreach (var log in logs)
+ {
+ if (log.LastWriteTimeUtc.AddDays(_retentionDays) < DateTime.UtcNow)
+ {
+ try
+ {
+ log.Delete();
+ }
+ catch (Exception)
+ {
+ // catch Exception and continue
+ // we shouldn't block logging and fail the runner if the runner can't delete an older log file.
+ }
+ }
+ }
+ }
+
+ string fileName = StringUtil.Format(_logFileNamingPattern, _logFilePrefix, DateTime.UtcNow);
+ string logFile = Path.Combine(_logFileDirectory, fileName);
+ Stream logStream;
+ if (File.Exists(logFile))
+ {
+ logStream = new FileStream(logFile, FileMode.Append, FileAccess.Write, FileShare.Read, bufferSize: 4096);
+ }
+ else
+ {
+ logStream = new FileStream(logFile, FileMode.Create, FileAccess.ReadWrite, FileShare.Read, bufferSize: 4096);
+ }
+
+ return new StreamWriter(logStream);
+ }
+ }
+}
diff --git a/src/Runner.Common/IExtension.cs b/src/Runner.Common/IExtension.cs
new file mode 100644
index 00000000000..23c5158a601
--- /dev/null
+++ b/src/Runner.Common/IExtension.cs
@@ -0,0 +1,9 @@
+using System;
+
+namespace GitHub.Runner.Common
+{
+ public interface IExtension : IRunnerService
+ {
+ Type ExtensionType { get; }
+ }
+}
diff --git a/src/Runner.Common/JobNotification.cs b/src/Runner.Common/JobNotification.cs
new file mode 100644
index 00000000000..e7756aaa17d
--- /dev/null
+++ b/src/Runner.Common/JobNotification.cs
@@ -0,0 +1,296 @@
+using System;
+using System.IO;
+using System.IO.Pipes;
+using System.Net;
+using System.Net.Sockets;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace GitHub.Runner.Common
+{
+ [ServiceLocator(Default = typeof(JobNotification))]
+ public interface IJobNotification : IRunnerService, IDisposable
+ {
+ Task JobStarted(Guid jobId, string accessToken, Uri serverUrl);
+ Task JobCompleted(Guid jobId);
+ void StartClient(string pipeName, string monitorSocketAddress, CancellationToken cancellationToken);
+ void StartClient(string socketAddress, string monitorSocketAddress);
+ }
+
+ public sealed class JobNotification : RunnerService, IJobNotification
+ {
+ private NamedPipeClientStream _outClient;
+ private StreamWriter _writeStream;
+ private Socket _socket;
+ private Socket _monitorSocket;
+ private bool _configured = false;
+ private bool _useSockets = false;
+ private bool _isMonitorConfigured = false;
+
+ public async Task JobStarted(Guid jobId, string accessToken, Uri serverUrl)
+ {
+ Trace.Info("Entering JobStarted Notification");
+
+ StartMonitor(jobId, accessToken, serverUrl);
+
+ if (_configured)
+ {
+ String message = $"Starting job: {jobId.ToString()}";
+ if (_useSockets)
+ {
+ try
+ {
+ Trace.Info("Writing JobStarted to socket");
+ _socket.Send(Encoding.UTF8.GetBytes(message));
+ Trace.Info("Finished JobStarted writing to socket");
+ }
+ catch (SocketException e)
+ {
+ Trace.Error($"Failed sending message \"{message}\" on socket!");
+ Trace.Error(e);
+ }
+ }
+ else
+ {
+ Trace.Info("Writing JobStarted to pipe");
+ await _writeStream.WriteLineAsync(message);
+ await _writeStream.FlushAsync();
+ Trace.Info("Finished JobStarted writing to pipe");
+ }
+ }
+ }
+
+ public async Task JobCompleted(Guid jobId)
+ {
+ Trace.Info("Entering JobCompleted Notification");
+
+ await EndMonitor();
+
+ if (_configured)
+ {
+ String message = $"Finished job: {jobId.ToString()}";
+ if (_useSockets)
+ {
+ try
+ {
+ Trace.Info("Writing JobCompleted to socket");
+ _socket.Send(Encoding.UTF8.GetBytes(message));
+ Trace.Info("Finished JobCompleted writing to socket");
+ }
+ catch (SocketException e)
+ {
+ Trace.Error($"Failed sending message \"{message}\" on socket!");
+ Trace.Error(e);
+ }
+ }
+ else
+ {
+ Trace.Info("Writing JobCompleted to pipe");
+ await _writeStream.WriteLineAsync(message);
+ await _writeStream.FlushAsync();
+ Trace.Info("Finished JobCompleted writing to pipe");
+ }
+ }
+ }
+
+ public async void StartClient(string pipeName, string monitorSocketAddress, CancellationToken cancellationToken)
+ {
+ if (pipeName != null && !_configured)
+ {
+ Trace.Info("Connecting to named pipe {0}", pipeName);
+ _outClient = new NamedPipeClientStream(".", pipeName, PipeDirection.Out, PipeOptions.Asynchronous);
+ await _outClient.ConnectAsync(cancellationToken);
+ _writeStream = new StreamWriter(_outClient, Encoding.UTF8);
+ _configured = true;
+ Trace.Info("Connection successful to named pipe {0}", pipeName);
+ }
+
+ ConnectMonitor(monitorSocketAddress);
+ }
+
+ public void StartClient(string socketAddress, string monitorSocketAddress)
+ {
+ if (!_configured)
+ {
+ try
+ {
+ string[] splitAddress = socketAddress.Split(':');
+ if (splitAddress.Length != 2)
+ {
+ Trace.Error("Invalid socket address {0}. Job Notification will be disabled.", socketAddress);
+ return;
+ }
+
+ IPAddress address;
+ try
+ {
+ address = IPAddress.Parse(splitAddress[0]);
+ }
+ catch (FormatException e)
+ {
+ Trace.Error("Invalid socket ip address {0}. Job Notification will be disabled",splitAddress[0]);
+ Trace.Error(e);
+ return;
+ }
+
+ int port = -1;
+ Int32.TryParse(splitAddress[1], out port);
+ if (port < IPEndPoint.MinPort || port > IPEndPoint.MaxPort)
+ {
+ Trace.Error("Invalid tcp socket port {0}. Job Notification will be disabled.", splitAddress[1]);
+ return;
+ }
+
+ _socket = new Socket(SocketType.Stream, ProtocolType.Tcp);
+ _socket.Connect(address, port);
+ Trace.Info("Connection successful to socket {0}", socketAddress);
+ _useSockets = true;
+ _configured = true;
+ }
+ catch (SocketException e)
+ {
+ Trace.Error("Connection to socket {0} failed!", socketAddress);
+ Trace.Error(e);
+ }
+ }
+
+ ConnectMonitor(monitorSocketAddress);
+ }
+
+ private void StartMonitor(Guid jobId, string accessToken, Uri serverUri)
+ {
+ if(String.IsNullOrEmpty(accessToken))
+ {
+ Trace.Info("No access token could be retrieved to start the monitor.");
+ return;
+ }
+
+ try
+ {
+ Trace.Info("Entering StartMonitor");
+ if (_isMonitorConfigured)
+ {
+ String message = $"Start {jobId.ToString()} {accessToken} {serverUri.ToString()} {System.Diagnostics.Process.GetCurrentProcess().Id}";
+
+ Trace.Info("Writing StartMonitor to socket");
+ _monitorSocket.Send(Encoding.UTF8.GetBytes(message));
+ Trace.Info("Finished StartMonitor writing to socket");
+ }
+ }
+ catch (SocketException e)
+ {
+ Trace.Error($"Failed sending StartMonitor message on socket!");
+ Trace.Error(e);
+ }
+ catch (Exception e)
+ {
+ Trace.Error($"Unexpected error occurred while sending StartMonitor message on socket!");
+ Trace.Error(e);
+ }
+ }
+
+ private async Task EndMonitor()
+ {
+ try
+ {
+ Trace.Info("Entering EndMonitor");
+ if (_isMonitorConfigured)
+ {
+ String message = $"End {System.Diagnostics.Process.GetCurrentProcess().Id}";
+ Trace.Info("Writing EndMonitor to socket");
+ _monitorSocket.Send(Encoding.UTF8.GetBytes(message));
+ Trace.Info("Finished EndMonitor writing to socket");
+
+ await Task.Delay(TimeSpan.FromSeconds(2));
+ }
+ }
+ catch (SocketException e)
+ {
+ Trace.Error($"Failed sending end message on socket!");
+ Trace.Error(e);
+ }
+ catch (Exception e)
+ {
+ Trace.Error($"Unexpected error occurred while sending StartMonitor message on socket!");
+ Trace.Error(e);
+ }
+ }
+
+ private void ConnectMonitor(string monitorSocketAddress)
+ {
+ int port = -1;
+ if (!_isMonitorConfigured && !String.IsNullOrEmpty(monitorSocketAddress))
+ {
+ try
+ {
+ string[] splitAddress = monitorSocketAddress.Split(':');
+ if (splitAddress.Length != 2)
+ {
+ Trace.Error("Invalid socket address {0}. Unable to connect to monitor.", monitorSocketAddress);
+ return;
+ }
+
+ IPAddress address;
+ try
+ {
+ address = IPAddress.Parse(splitAddress[0]);
+ }
+ catch (FormatException e)
+ {
+ Trace.Error("Invalid socket IP address {0}. Unable to connect to monitor.", splitAddress[0]);
+ Trace.Error(e);
+ return;
+ }
+
+ Int32.TryParse(splitAddress[1], out port);
+ if (port < IPEndPoint.MinPort || port > IPEndPoint.MaxPort)
+ {
+ Trace.Error("Invalid TCP socket port {0}. Unable to connect to monitor.", splitAddress[1]);
+ return;
+ }
+
+
+ Trace.Verbose("Trying to connect to monitor at port {0}", port);
+ _monitorSocket = new Socket(SocketType.Stream, ProtocolType.Tcp);
+ _monitorSocket.Connect(address, port);
+ Trace.Info("Connection successful to local port {0}", port);
+ _isMonitorConfigured = true;
+ }
+ catch (Exception e)
+ {
+ Trace.Error("Connection to monitor port {0} failed!", port);
+ Trace.Error(e);
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ GC.SuppressFinalize(this);
+ }
+
+ private void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _outClient?.Dispose();
+
+ if (_socket != null)
+ {
+ _socket.Send(Encoding.UTF8.GetBytes(""));
+ _socket.Shutdown(SocketShutdown.Both);
+ _socket = null;
+ }
+
+ if (_monitorSocket != null)
+ {
+ _monitorSocket.Send(Encoding.UTF8.GetBytes(""));
+ _monitorSocket.Shutdown(SocketShutdown.Both);
+ _monitorSocket = null;
+ }
+ }
+ }
+ }
+}
diff --git a/src/Runner.Common/JobServer.cs b/src/Runner.Common/JobServer.cs
new file mode 100644
index 00000000000..d7576199e81
--- /dev/null
+++ b/src/Runner.Common/JobServer.cs
@@ -0,0 +1,162 @@
+using GitHub.DistributedTask.WebApi;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Threading;
+using System.Threading.Tasks;
+using GitHub.Services.WebApi;
+
+namespace GitHub.Runner.Common
+{
+ [ServiceLocator(Default = typeof(JobServer))]
+ public interface IJobServer : IRunnerService
+ {
+ Task ConnectAsync(VssConnection jobConnection);
+
+ // logging and console
+ Task AppendLogContentAsync(Guid scopeIdentifier, string hubName, Guid planId, int logId, Stream uploadStream, CancellationToken cancellationToken);
+ Task AppendTimelineRecordFeedAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, Guid stepId, IList lines, CancellationToken cancellationToken);
+ Task CreateAttachmentAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, String type, String name, Stream uploadStream, CancellationToken cancellationToken);
+ Task CreateLogAsync(Guid scopeIdentifier, string hubName, Guid planId, TaskLog log, CancellationToken cancellationToken);
+ Task CreateTimelineAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, CancellationToken cancellationToken);
+ Task> UpdateTimelineRecordsAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, IEnumerable records, CancellationToken cancellationToken);
+ Task RaisePlanEventAsync(Guid scopeIdentifier, string hubName, Guid planId, T eventData, CancellationToken cancellationToken) where T : JobEvent;
+ Task GetTimelineAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, CancellationToken cancellationToken);
+ }
+
+ public sealed class JobServer : RunnerService, IJobServer
+ {
+ private bool _hasConnection;
+ private VssConnection _connection;
+ private TaskHttpClient _taskClient;
+
+ public async Task ConnectAsync(VssConnection jobConnection)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return;
+ }
+
+ _connection = jobConnection;
+ int attemptCount = 5;
+ while (!_connection.HasAuthenticated && attemptCount-- > 0)
+ {
+ try
+ {
+ await _connection.ConnectAsync();
+ break;
+ }
+ catch (Exception ex) when (attemptCount > 0)
+ {
+ Trace.Info($"Catch exception during connect. {attemptCount} attemp left.");
+ Trace.Error(ex);
+ }
+
+ await Task.Delay(100);
+ }
+
+ _taskClient = _connection.GetClient();
+ _hasConnection = true;
+ }
+
+ private void CheckConnection()
+ {
+ if (!_hasConnection)
+ {
+ throw new InvalidOperationException("SetConnection");
+ }
+ }
+
+ //-----------------------------------------------------------------
+ // Feedback: WebConsole, TimelineRecords and Logs
+ //-----------------------------------------------------------------
+
+ public Task AppendLogContentAsync(Guid scopeIdentifier, string hubName, Guid planId, int logId, Stream uploadStream, CancellationToken cancellationToken)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.FromResult(null);
+ }
+
+ CheckConnection();
+ return _taskClient.AppendLogContentAsync(scopeIdentifier, hubName, planId, logId, uploadStream, cancellationToken: cancellationToken);
+ }
+
+ public Task AppendTimelineRecordFeedAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, Guid stepId, IList lines, CancellationToken cancellationToken)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.CompletedTask;
+ }
+
+ CheckConnection();
+ return _taskClient.AppendTimelineRecordFeedAsync(scopeIdentifier, hubName, planId, timelineId, timelineRecordId, stepId, lines, cancellationToken: cancellationToken);
+ }
+
+ public Task CreateAttachmentAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, Guid timelineRecordId, string type, string name, Stream uploadStream, CancellationToken cancellationToken)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.FromResult(null);
+ }
+
+ CheckConnection();
+ return _taskClient.CreateAttachmentAsync(scopeIdentifier, hubName, planId, timelineId, timelineRecordId, type, name, uploadStream, cancellationToken: cancellationToken);
+ }
+
+ public Task CreateLogAsync(Guid scopeIdentifier, string hubName, Guid planId, TaskLog log, CancellationToken cancellationToken)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.FromResult(null);
+ }
+
+ CheckConnection();
+ return _taskClient.CreateLogAsync(scopeIdentifier, hubName, planId, log, cancellationToken: cancellationToken);
+ }
+
+ public Task CreateTimelineAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, CancellationToken cancellationToken)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.FromResult(null);
+ }
+
+ CheckConnection();
+ return _taskClient.CreateTimelineAsync(scopeIdentifier, hubName, planId, new Timeline(timelineId), cancellationToken: cancellationToken);
+ }
+
+ public Task> UpdateTimelineRecordsAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, IEnumerable records, CancellationToken cancellationToken)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.FromResult>(null);
+ }
+
+ CheckConnection();
+ return _taskClient.UpdateTimelineRecordsAsync(scopeIdentifier, hubName, planId, timelineId, records, cancellationToken: cancellationToken);
+ }
+
+ public Task RaisePlanEventAsync(Guid scopeIdentifier, string hubName, Guid planId, T eventData, CancellationToken cancellationToken) where T : JobEvent
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.CompletedTask;
+ }
+
+ CheckConnection();
+ return _taskClient.RaisePlanEventAsync(scopeIdentifier, hubName, planId, eventData, cancellationToken: cancellationToken);
+ }
+
+ public Task GetTimelineAsync(Guid scopeIdentifier, string hubName, Guid planId, Guid timelineId, CancellationToken cancellationToken)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return Task.FromResult(null);
+ }
+
+ CheckConnection();
+ return _taskClient.GetTimelineAsync(scopeIdentifier, hubName, planId, timelineId, includeRecords: true, cancellationToken: cancellationToken);
+ }
+ }
+}
diff --git a/src/Runner.Common/JobServerQueue.cs b/src/Runner.Common/JobServerQueue.cs
new file mode 100644
index 00000000000..5ffd7eac2e6
--- /dev/null
+++ b/src/Runner.Common/JobServerQueue.cs
@@ -0,0 +1,702 @@
+using GitHub.DistributedTask.WebApi;
+using GitHub.Runner.Common.Util;
+using System;
+using System.Collections.Generic;
+using System.Collections.Concurrent;
+using System.IO;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using Pipelines = GitHub.DistributedTask.Pipelines;
+using GitHub.Runner.Sdk;
+
+namespace GitHub.Runner.Common
+{
+ [ServiceLocator(Default = typeof(JobServerQueue))]
+ public interface IJobServerQueue : IRunnerService, IThrottlingReporter
+ {
+ event EventHandler JobServerQueueThrottling;
+ Task ShutdownAsync();
+ void Start(Pipelines.AgentJobRequestMessage jobRequest);
+ void QueueWebConsoleLine(Guid stepRecordId, string line);
+ void QueueFileUpload(Guid timelineId, Guid timelineRecordId, string type, string name, string path, bool deleteSource);
+ void QueueTimelineRecordUpdate(Guid timelineId, TimelineRecord timelineRecord);
+ }
+
+ public sealed class JobServerQueue : RunnerService, IJobServerQueue
+ {
+ // Default delay for Dequeue process
+ private static readonly TimeSpan _aggressiveDelayForWebConsoleLineDequeue = TimeSpan.FromMilliseconds(250);
+ private static readonly TimeSpan _delayForWebConsoleLineDequeue = TimeSpan.FromMilliseconds(500);
+ private static readonly TimeSpan _delayForTimelineUpdateDequeue = TimeSpan.FromMilliseconds(500);
+ private static readonly TimeSpan _delayForFileUploadDequeue = TimeSpan.FromMilliseconds(1000);
+
+ // Job message information
+ private Guid _scopeIdentifier;
+ private string _hubName;
+ private Guid _planId;
+ private Guid _jobTimelineId;
+ private Guid _jobTimelineRecordId;
+
+ // queue for web console line
+ private readonly ConcurrentQueue _webConsoleLineQueue = new ConcurrentQueue();
+
+ // queue for file upload (log file or attachment)
+ private readonly ConcurrentQueue _fileUploadQueue = new ConcurrentQueue();
+
+ // queue for timeline or timeline record update (one queue per timeline)
+ private readonly ConcurrentDictionary> _timelineUpdateQueue = new ConcurrentDictionary>();
+
+ // indicate how many timelines we have, we will process _timelineUpdateQueue base on the order of timeline in this list
+ private readonly List _allTimelines = new List();
+
+ // bufferd timeline records that fail to update
+ private readonly Dictionary> _bufferedRetryRecords = new Dictionary>();
+
+ // Task for each queue's dequeue process
+ private Task _webConsoleLineDequeueTask;
+ private Task _fileUploadDequeueTask;
+ private Task _timelineUpdateDequeueTask;
+
+ // common
+ private IJobServer _jobServer;
+ private Task[] _allDequeueTasks;
+ private readonly TaskCompletionSource _jobCompletionSource = new TaskCompletionSource();
+ private bool _queueInProcess = false;
+ private ITerminal _term;
+
+ public event EventHandler JobServerQueueThrottling;
+
+ // Web console dequeue will start with process queue every 250ms for the first 60*4 times (~60 seconds).
+ // Then the dequeue will happen every 500ms.
+ // In this way, customer still can get instance live console output on job start,
+ // at the same time we can cut the load to server after the build run for more than 60s
+ private int _webConsoleLineAggressiveDequeueCount = 0;
+ private const int _webConsoleLineAggressiveDequeueLimit = 4 * 60;
+ private bool _webConsoleLineAggressiveDequeue = true;
+ private bool _firstConsoleOutputs = true;
+
+ public override void Initialize(IHostContext hostContext)
+ {
+ base.Initialize(hostContext);
+ _jobServer = hostContext.GetService();
+ }
+
+ public void Start(Pipelines.AgentJobRequestMessage jobRequest)
+ {
+ Trace.Entering();
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ _term = HostContext.GetService();
+ return;
+ }
+
+ if (_queueInProcess)
+ {
+ Trace.Info("No-opt, all queue process tasks are running.");
+ return;
+ }
+
+ ArgUtil.NotNull(jobRequest, nameof(jobRequest));
+ ArgUtil.NotNull(jobRequest.Plan, nameof(jobRequest.Plan));
+ ArgUtil.NotNull(jobRequest.Timeline, nameof(jobRequest.Timeline));
+
+ _scopeIdentifier = jobRequest.Plan.ScopeIdentifier;
+ _hubName = jobRequest.Plan.PlanType;
+ _planId = jobRequest.Plan.PlanId;
+ _jobTimelineId = jobRequest.Timeline.Id;
+ _jobTimelineRecordId = jobRequest.JobId;
+
+ // Server already create the job timeline
+ _timelineUpdateQueue[_jobTimelineId] = new ConcurrentQueue();
+ _allTimelines.Add(_jobTimelineId);
+
+ // Start three dequeue task
+ Trace.Info("Start process web console line queue.");
+ _webConsoleLineDequeueTask = ProcessWebConsoleLinesQueueAsync();
+
+ Trace.Info("Start process file upload queue.");
+ _fileUploadDequeueTask = ProcessFilesUploadQueueAsync();
+
+ Trace.Info("Start process timeline update queue.");
+ _timelineUpdateDequeueTask = ProcessTimelinesUpdateQueueAsync();
+
+ _allDequeueTasks = new Task[] { _webConsoleLineDequeueTask, _fileUploadDequeueTask, _timelineUpdateDequeueTask };
+ _queueInProcess = true;
+ }
+
+ // WebConsoleLine queue and FileUpload queue are always best effort
+ // TimelineUpdate queue error will become critical when timeline records contain output variabls.
+ public async Task ShutdownAsync()
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return;
+ }
+
+ if (!_queueInProcess)
+ {
+ Trace.Info("No-op, all queue process tasks have been stopped.");
+ }
+
+ Trace.Info("Fire signal to shutdown all queues.");
+ _jobCompletionSource.TrySetResult(0);
+
+ await Task.WhenAll(_allDequeueTasks);
+ _queueInProcess = false;
+ Trace.Info("All queue process task stopped.");
+
+ // Drain the queue
+ // ProcessWebConsoleLinesQueueAsync() will never throw exception, live console update is always best effort.
+ Trace.Verbose("Draining web console line queue.");
+ await ProcessWebConsoleLinesQueueAsync(runOnce: true);
+ Trace.Info("Web console line queue drained.");
+
+ // ProcessFilesUploadQueueAsync() will never throw exception, log file upload is always best effort.
+ Trace.Verbose("Draining file upload queue.");
+ await ProcessFilesUploadQueueAsync(runOnce: true);
+ Trace.Info("File upload queue drained.");
+
+ // ProcessTimelinesUpdateQueueAsync() will throw exception during shutdown
+ // if there is any timeline records that failed to update contains output variabls.
+ Trace.Verbose("Draining timeline update queue.");
+ await ProcessTimelinesUpdateQueueAsync(runOnce: true);
+ Trace.Info("Timeline update queue drained.");
+
+ Trace.Info("All queue process tasks have been stopped, and all queues are drained.");
+ }
+
+ public void QueueWebConsoleLine(Guid stepRecordId, string line)
+ {
+ Trace.Verbose("Enqueue web console line queue: {0}", line);
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ if ((line ?? string.Empty).StartsWith("##[section]"))
+ {
+ Console.WriteLine("******************************************************************************");
+ Console.WriteLine(line.Substring("##[section]".Length));
+ Console.WriteLine("******************************************************************************");
+ }
+ else
+ {
+ Console.WriteLine(line);
+ }
+
+ return;
+ }
+
+ _webConsoleLineQueue.Enqueue(new ConsoleLineInfo(stepRecordId, line));
+ }
+
+ public void QueueFileUpload(Guid timelineId, Guid timelineRecordId, string type, string name, string path, bool deleteSource)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return;
+ }
+
+ ArgUtil.NotEmpty(timelineId, nameof(timelineId));
+ ArgUtil.NotEmpty(timelineRecordId, nameof(timelineRecordId));
+
+ // all parameter not null, file path exist.
+ var newFile = new UploadFileInfo()
+ {
+ TimelineId = timelineId,
+ TimelineRecordId = timelineRecordId,
+ Type = type,
+ Name = name,
+ Path = path,
+ DeleteSource = deleteSource
+ };
+
+ Trace.Verbose("Enqueue file upload queue: file '{0}' attach to record {1}", newFile.Path, timelineRecordId);
+ _fileUploadQueue.Enqueue(newFile);
+ }
+
+ public void QueueTimelineRecordUpdate(Guid timelineId, TimelineRecord timelineRecord)
+ {
+ if (HostContext.RunMode == RunMode.Local)
+ {
+ return;
+ }
+
+ ArgUtil.NotEmpty(timelineId, nameof(timelineId));
+ ArgUtil.NotNull(timelineRecord, nameof(timelineRecord));
+ ArgUtil.NotEmpty(timelineRecord.Id, nameof(timelineRecord.Id));
+
+ _timelineUpdateQueue.TryAdd(timelineId, new ConcurrentQueue());
+
+ Trace.Verbose("Enqueue timeline {0} update queue: {1}", timelineId, timelineRecord.Id);
+ _timelineUpdateQueue[timelineId].Enqueue(timelineRecord.Clone());
+ }
+
+ public void ReportThrottling(TimeSpan delay, DateTime expiration)
+ {
+ Trace.Info($"Receive server throttling report, expect delay {delay} milliseconds till {expiration}");
+ var throttlingEvent = JobServerQueueThrottling;
+ if (throttlingEvent != null)
+ {
+ throttlingEvent(this, new ThrottlingEventArgs(delay, expiration));
+ }
+ }
+
+ private async Task ProcessWebConsoleLinesQueueAsync(bool runOnce = false)
+ {
+ while (!_jobCompletionSource.Task.IsCompleted || runOnce)
+ {
+ if (_webConsoleLineAggressiveDequeue && ++_webConsoleLineAggressiveDequeueCount > _webConsoleLineAggressiveDequeueLimit)
+ {
+ Trace.Info("Stop aggressive process web console line queue.");
+ _webConsoleLineAggressiveDequeue = false;
+ }
+
+ // Group consolelines by timeline record of each step
+ Dictionary> stepsConsoleLines = new Dictionary>();
+ List stepRecordIds = new List(); // We need to keep lines in order
+ int linesCounter = 0;
+ ConsoleLineInfo lineInfo;
+ while (_webConsoleLineQueue.TryDequeue(out lineInfo))
+ {
+ if (!stepsConsoleLines.ContainsKey(lineInfo.StepRecordId))
+ {
+ stepsConsoleLines[lineInfo.StepRecordId] = new List();
+ stepRecordIds.Add(lineInfo.StepRecordId);
+ }
+
+ if (!string.IsNullOrEmpty(lineInfo.Line) && lineInfo.Line.Length > 1024)
+ {
+ Trace.Verbose("Web console line is more than 1024 chars, truncate to first 1024 chars");
+ lineInfo.Line = $"{lineInfo.Line.Substring(0, 1024)}...";
+ }
+
+ stepsConsoleLines[lineInfo.StepRecordId].Add(lineInfo.Line);
+ linesCounter++;
+
+ // process at most about 500 lines of web console line during regular timer dequeue task.
+ if (!runOnce && linesCounter > 500)
+ {
+ break;
+ }
+ }
+
+ // Batch post consolelines for each step timeline record
+ foreach (var stepRecordId in stepRecordIds)
+ {
+ // Split consolelines into batch, each batch will container at most 100 lines.
+ int batchCounter = 0;
+ List> batchedLines = new List>();
+ foreach (var line in stepsConsoleLines[stepRecordId])
+ {
+ var currentBatch = batchedLines.ElementAtOrDefault(batchCounter);
+ if (currentBatch == null)
+ {
+ batchedLines.Add(new List());
+ currentBatch = batchedLines.ElementAt(batchCounter);
+ }
+
+ currentBatch.Add(line);
+
+ if (currentBatch.Count >= 100)
+ {
+ batchCounter++;
+ }
+ }
+
+ if (batchedLines.Count > 0)
+ {
+ // When job finish, web console lines becomes less interesting to customer
+ // We batch and produce 500 lines of web console output every 500ms
+ // If customer's task produce massive of outputs, then the last queue drain run might take forever.
+ // So we will only upload the last 200 lines of each step from all buffered web console lines.
+ if (runOnce && batchedLines.Count > 2)
+ {
+ Trace.Info($"Skip {batchedLines.Count - 2} batches web console lines for last run");
+ batchedLines = batchedLines.TakeLast(2).ToList();
+ batchedLines[0].Insert(0, "...");
+ }
+
+ int errorCount = 0;
+ foreach (var batch in batchedLines)
+ {
+ try
+ {
+ // we will not requeue failed batch, since the web console lines are time sensitive.
+ await _jobServer.AppendTimelineRecordFeedAsync(_scopeIdentifier, _hubName, _planId, _jobTimelineId, _jobTimelineRecordId, stepRecordId, batch, default(CancellationToken));
+ if (_firstConsoleOutputs)
+ {
+ HostContext.WritePerfCounter($"WorkerJobServerQueueAppendFirstConsoleOutput_{_planId.ToString()}");
+ _firstConsoleOutputs = false;
+ }
+ }
+ catch (Exception ex)
+ {
+ Trace.Info("Catch exception during append web console line, keep going since the process is best effort.");
+ Trace.Error(ex);
+ errorCount++;
+ }
+ }
+
+ Trace.Info("Try to append {0} batches web console lines for record '{2}', success rate: {1}/{0}.", batchedLines.Count, batchedLines.Count - errorCount, stepRecordId);
+ }
+ }
+
+ if (runOnce)
+ {
+ break;
+ }
+ else
+ {
+ await Task.Delay(_webConsoleLineAggressiveDequeue ? _aggressiveDelayForWebConsoleLineDequeue : _delayForWebConsoleLineDequeue);
+ }
+ }
+ }
+
+ private async Task ProcessFilesUploadQueueAsync(bool runOnce = false)
+ {
+ while (!_jobCompletionSource.Task.IsCompleted || runOnce)
+ {
+ List filesToUpload = new List();
+ UploadFileInfo dequeueFile;
+ while (_fileUploadQueue.TryDequeue(out dequeueFile))
+ {
+ filesToUpload.Add(dequeueFile);
+ // process at most 10 file upload.
+ if (!runOnce && filesToUpload.Count > 10)
+ {
+ break;
+ }
+ }
+
+ if (filesToUpload.Count > 0)
+ {
+ if (runOnce)
+ {
+ Trace.Info($"Uploading {filesToUpload.Count} files in one shot.");
+ }
+
+ // TODO: upload all file in parallel
+ int errorCount = 0;
+ foreach (var file in filesToUpload)
+ {
+ try
+ {
+ await UploadFile(file);
+ }
+ catch (Exception ex)
+ {
+ Trace.Info("Catch exception during log or attachment file upload, keep going since the process is best effort.");
+ Trace.Error(ex);
+ errorCount++;
+
+ // put the failed upload file back to queue.
+ // TODO: figure out how should we retry paging log upload.
+ //lock (_fileUploadQueueLock)
+ //{
+ // _fileUploadQueue.Enqueue(file);
+ //}
+ }
+ }
+
+ Trace.Info("Try to upload {0} log files or attachments, success rate: {1}/{0}.", filesToUpload.Count, filesToUpload.Count - errorCount);
+ }
+
+ if (runOnce)
+ {
+ break;
+ }
+ else
+ {
+ await Task.Delay(_delayForFileUploadDequeue);
+ }
+ }
+ }
+
+ private async Task ProcessTimelinesUpdateQueueAsync(bool runOnce = false)
+ {
+ while (!_jobCompletionSource.Task.IsCompleted || runOnce)
+ {
+ List pendingUpdates = new List();
+ foreach (var timeline in _allTimelines)
+ {
+ ConcurrentQueue recordQueue;
+ if (_timelineUpdateQueue.TryGetValue(timeline, out recordQueue))
+ {
+ List records = new List();
+ TimelineRecord record;
+ while (recordQueue.TryDequeue(out record))
+ {
+ records.Add(record);
+ // process at most 25 timeline records update for each timeline.
+ if (!runOnce && records.Count > 25)
+ {
+ break;
+ }
+ }
+
+ if (records.Count > 0)
+ {
+ pendingUpdates.Add(new PendingTimelineRecord() { TimelineId = timeline, PendingRecords = records.ToList() });
+ }
+ }
+ }
+
+ // we need track whether we have new sub-timeline been created on the last run.
+ // if so, we need continue update timeline record even we on the last run.
+ bool pendingSubtimelineUpdate = false;
+ List mainTimelineRecordsUpdateErrors = new List();
+ if (pendingUpdates.Count > 0)
+ {
+ foreach (var update in pendingUpdates)
+ {
+ List bufferedRecords;
+ if (_bufferedRetryRecords.TryGetValue(update.TimelineId, out bufferedRecords))
+ {
+ update.PendingRecords.InsertRange(0, bufferedRecords);
+ }
+
+ update.PendingRecords = MergeTimelineRecords(update.PendingRecords);
+
+ foreach (var detailTimeline in update.PendingRecords.Where(r => r.Details != null))
+ {
+ if (!_allTimelines.Contains(detailTimeline.Details.Id))
+ {
+ try
+ {
+ Timeline newTimeline = await _jobServer.CreateTimelineAsync(_scopeIdentifier, _hubName, _planId, detailTimeline.Details.Id, default(CancellationToken));
+ _allTimelines.Add(newTimeline.Id);
+ pendingSubtimelineUpdate = true;
+ }
+ catch (TimelineExistsException)
+ {
+ Trace.Info("Catch TimelineExistsException during timeline creation. Ignore the error since server already had this timeline.");
+ _allTimelines.Add(detailTimeline.Details.Id);
+ }
+ catch (Exception ex)
+ {
+ Trace.Error(ex);
+ }
+ }
+ }
+
+ try
+ {
+ await _jobServer.UpdateTimelineRecordsAsync(_scopeIdentifier, _hubName, _planId, update.TimelineId, update.PendingRecords, default(CancellationToken));
+ if (_bufferedRetryRecords.Remove(update.TimelineId))
+ {
+ Trace.Verbose("Cleanup buffered timeline record for timeline: {0}.", update.TimelineId);
+ }
+ }
+ catch (Exception ex)
+ {
+ Trace.Info("Catch exception during update timeline records, try to update these timeline records next time.");
+ Trace.Error(ex);
+ _bufferedRetryRecords[update.TimelineId] = update.PendingRecords.ToList();
+ if (update.TimelineId == _jobTimelineId)
+ {
+ mainTimelineRecordsUpdateErrors.Add(ex);
+ }
+ }
+ }
+ }
+
+ if (runOnce)
+ {
+ // continue process timeline records update,
+ // we might have more records need update,
+ // since we just create a new sub-timeline
+ if (pendingSubtimelineUpdate)
+ {
+ continue;
+ }
+ else
+ {
+ if (mainTimelineRecordsUpdateErrors.Count > 0 &&
+ _bufferedRetryRecords.ContainsKey(_jobTimelineId) &&
+ _bufferedRetryRecords[_jobTimelineId] != null &&
+ _bufferedRetryRecords[_jobTimelineId].Any(r => r.Variables.Count > 0))
+ {
+ Trace.Info("Fail to update timeline records with output variables. Throw exception to fail the job since output variables are critical to downstream jobs.");
+ throw new AggregateException("Failed to publish output variables.", mainTimelineRecordsUpdateErrors);
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+ else
+ {
+ await Task.Delay(_delayForTimelineUpdateDequeue);
+ }
+ }
+ }
+
+ private List MergeTimelineRecords(List timelineRecords)
+ {
+ if (timelineRecords == null || timelineRecords.Count <= 1)
+ {
+ return timelineRecords;
+ }
+
+ Dictionary dict = new Dictionary();
+ foreach (TimelineRecord rec in timelineRecords)
+ {
+ if (rec == null)
+ {
+ continue;
+ }
+
+ TimelineRecord timelineRecord;
+ if (dict.TryGetValue(rec.Id, out timelineRecord))
+ {
+ // Merge rec into timelineRecord
+ timelineRecord.CurrentOperation = rec.CurrentOperation ?? timelineRecord.CurrentOperation;
+ timelineRecord.Details = rec.Details ?? timelineRecord.Details;
+ timelineRecord.FinishTime = rec.FinishTime ?? timelineRecord.FinishTime;
+ timelineRecord.Log = rec.Log ?? timelineRecord.Log;
+ timelineRecord.Name = rec.Name ?? timelineRecord.Name;
+ timelineRecord.RefName = rec.RefName ?? timelineRecord.RefName;
+ timelineRecord.PercentComplete = rec.PercentComplete ?? timelineRecord.PercentComplete;
+ timelineRecord.RecordType = rec.RecordType ?? timelineRecord.RecordType;
+ timelineRecord.Result = rec.Result ?? timelineRecord.Result;
+ timelineRecord.ResultCode = rec.ResultCode ?? timelineRecord.ResultCode;
+ timelineRecord.StartTime = rec.StartTime ?? timelineRecord.StartTime;
+ timelineRecord.State = rec.State ?? timelineRecord.State;
+ timelineRecord.WorkerName = rec.WorkerName ?? timelineRecord.WorkerName;
+
+ if (rec.ErrorCount != null && rec.ErrorCount > 0)
+ {
+ timelineRecord.ErrorCount = rec.ErrorCount;
+ }
+
+ if (rec.WarningCount != null && rec.WarningCount > 0)
+ {
+ timelineRecord.WarningCount = rec.WarningCount;
+ }
+
+ if (rec.Issues.Count > 0)
+ {
+ timelineRecord.Issues.Clear();
+ timelineRecord.Issues.AddRange(rec.Issues.Select(i => i.Clone()));
+ }
+
+ if (rec.Variables.Count > 0)
+ {
+ foreach (var variable in rec.Variables)
+ {
+ timelineRecord.Variables[variable.Key] = variable.Value.Clone();
+ }
+ }
+ }
+ else
+ {
+ dict.Add(rec.Id, rec);
+ }
+ }
+
+ var mergedRecords = dict.Values.ToList();
+
+ Trace.Verbose("Merged Timeline records");
+ foreach (var record in mergedRecords)
+ {
+ Trace.Verbose($" Record: t={record.RecordType}, n={record.Name}, s={record.State}, st={record.StartTime}, {record.PercentComplete}%, ft={record.FinishTime}, r={record.Result}: {record.CurrentOperation}");
+ if (record.Issues != null && record.Issues.Count > 0)
+ {
+ foreach (var issue in record.Issues)
+ {
+ String source;
+ issue.Data.TryGetValue("sourcepath", out source);
+ Trace.Verbose($" Issue: c={issue.Category}, t={issue.Type}, s={source ?? string.Empty}, m={issue.Message}");
+ }
+ }
+
+ if (record.Variables != null && record.Variables.Count > 0)
+ {
+ foreach (var variable in record.Variables)
+ {
+ Trace.Verbose($" Variable: n={variable.Key}, secret={variable.Value.IsSecret}");
+ }
+ }
+ }
+
+ return mergedRecords;
+ }
+
+ private async Task UploadFile(UploadFileInfo file)
+ {
+ bool uploadSucceed = false;
+ try
+ {
+ if (String.Equals(file.Type, CoreAttachmentType.Log, StringComparison.OrdinalIgnoreCase))
+ {
+ // Create the log
+ var taskLog = await _jobServer.CreateLogAsync(_scopeIdentifier, _hubName, _planId, new TaskLog(String.Format(@"logs\{0:D}", file.TimelineRecordId)), default(CancellationToken));
+
+ // Upload the contents
+ using (FileStream fs = File.Open(file.Path, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
+ {
+ var logUploaded = await _jobServer.AppendLogContentAsync(_scopeIdentifier, _hubName, _planId, taskLog.Id, fs, default(CancellationToken));
+ }
+
+ // Create a new record and only set the Log field
+ var attachmentUpdataRecord = new TimelineRecord() { Id = file.TimelineRecordId, Log = taskLog };
+ QueueTimelineRecordUpdate(file.TimelineId, attachmentUpdataRecord);
+ }
+ else
+ {
+ // Create attachment
+ using (FileStream fs = File.Open(file.Path, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
+ {
+ var result = await _jobServer.CreateAttachmentAsync(_scopeIdentifier, _hubName, _planId, file.TimelineId, file.TimelineRecordId, file.Type, file.Name, fs, default(CancellationToken));
+ }
+ }
+
+ uploadSucceed = true;
+ }
+ finally
+ {
+ if (uploadSucceed && file.DeleteSource)
+ {
+ try
+ {
+ File.Delete(file.Path);
+ }
+ catch (Exception ex)
+ {
+ Trace.Info("Catch exception during delete success uploaded file.");
+ Trace.Error(ex);
+ }
+ }
+ }
+ }
+ }
+
+ internal class PendingTimelineRecord
+ {
+ public Guid TimelineId { get; set; }
+ public List PendingRecords { get; set; }
+ }
+
+ internal class UploadFileInfo
+ {
+ public Guid TimelineId { get; set; }
+ public Guid TimelineRecordId { get; set; }
+ public string Type { get; set; }
+ public string Name { get; set; }
+ public string Path { get; set; }
+ public bool DeleteSource { get; set; }
+ }
+
+
+ internal class ConsoleLineInfo
+ {
+ public ConsoleLineInfo(Guid recordId, string line)
+ {
+ this.StepRecordId = recordId;
+ this.Line = line;
+ }
+
+ public Guid StepRecordId { get; set; }
+ public string Line { get; set; }
+ }
+}
diff --git a/src/Runner.Common/LocationServer.cs b/src/Runner.Common/LocationServer.cs
new file mode 100644
index 00000000000..25e09228332
--- /dev/null
+++ b/src/Runner.Common/LocationServer.cs
@@ -0,0 +1,61 @@
+using System;
+using System.Threading.Tasks;
+using GitHub.Services.WebApi;
+using GitHub.Services.Location.Client;
+using GitHub.Services.Location;
+
+namespace GitHub.Runner.Common
+{
+ [ServiceLocator(Default = typeof(LocationServer))]
+ public interface ILocationServer : IRunnerService
+ {
+ Task ConnectAsync(VssConnection jobConnection);
+
+ Task GetConnectionDataAsync();
+ }
+
+ public sealed class LocationServer : RunnerService, ILocationServer
+ {
+ private bool _hasConnection;
+ private VssConnection _connection;
+ private LocationHttpClient _locationClient;
+
+ public async Task ConnectAsync(VssConnection jobConnection)
+ {
+ _connection = jobConnection;
+ int attemptCount = 5;
+ while (!_connection.HasAuthenticated && attemptCount-- > 0)
+ {
+ try
+ {
+ await _connection.ConnectAsync();
+ break;
+ }
+ catch (Exception ex) when (attemptCount > 0)
+ {
+ Trace.Info($"Catch exception during connect. {attemptCount} attempt left.");
+ Trace.Error(ex);
+ }
+
+ await Task.Delay(100);
+ }
+
+ _locationClient = _connection.GetClient();
+ _hasConnection = true;
+ }
+
+ private void CheckConnection()
+ {
+ if (!_hasConnection)
+ {
+ throw new InvalidOperationException("SetConnection");
+ }
+ }
+
+ public async Task GetConnectionDataAsync()
+ {
+ CheckConnection();
+ return await _locationClient.GetConnectionDataAsync(ConnectOptions.None, 0);
+ }
+ }
+}
diff --git a/src/Runner.Common/Logging.cs b/src/Runner.Common/Logging.cs
new file mode 100644
index 00000000000..26a25d64482
--- /dev/null
+++ b/src/Runner.Common/Logging.cs
@@ -0,0 +1,124 @@
+using GitHub.Runner.Common.Util;
+using System;
+using System.IO;
+
+namespace GitHub.Runner.Common
+{
+ [ServiceLocator(Default = typeof(PagingLogger))]
+ public interface IPagingLogger : IRunnerService
+ {
+ long TotalLines { get; }
+ void Setup(Guid timelineId, Guid timelineRecordId);
+
+ void Write(string message);
+
+ void End();
+ }
+
+ public class PagingLogger : RunnerService, IPagingLogger
+ {
+ public static string PagingFolder = "pages";
+
+ // 8 MB
+ public const int PageSize = 8 * 1024 * 1024;
+
+ private Guid _timelineId;
+ private Guid _timelineRecordId;
+ private string _pageId;
+ private FileStream _pageData;
+ private StreamWriter _pageWriter;
+ private int _byteCount;
+ private int _pageCount;
+ private long _totalLines;
+ private string _dataFileName;
+ private string _pagesFolder;
+ private IJobServerQueue _jobServerQueue;
+
+ public long TotalLines => _totalLines;
+
+ public override void Initialize(IHostContext hostContext)
+ {
+ base.Initialize(hostContext);
+ _totalLines = 0;
+ _pageId = Guid.NewGuid().ToString();
+ _pagesFolder = Path.Combine(hostContext.GetDirectory(WellKnownDirectory.Diag), PagingFolder);
+ _jobServerQueue = HostContext.GetService();
+ Directory.CreateDirectory(_pagesFolder);
+ }
+
+ public void Setup(Guid timelineId, Guid timelineRecordId)
+ {
+ _timelineId = timelineId;
+ _timelineRecordId = timelineRecordId;
+ }
+
+ //
+ // Write a metadata file with id etc, point to pages on disk.
+ // Each page is a guid_#. As a page rolls over, it events it's done
+ // and the consumer queues it for upload
+ // Ensure this is lazy. Create a page on first write
+ //
+ public void Write(string message)
+ {
+ // lazy creation on write
+ if (_pageWriter == null)
+ {
+ Create();
+ }
+
+ string line = $"{DateTime.UtcNow.ToString("O")} {message}";
+ _pageWriter.WriteLine(line);
+
+ _totalLines++;
+ if (line.IndexOf('\n') != -1)
+ {
+ foreach (char c in line)
+ {
+ if (c == '\n')
+ {
+ _totalLines++;
+ }
+ }
+ }
+
+ _byteCount += System.Text.Encoding.UTF8.GetByteCount(line);
+ if (_byteCount >= PageSize)
+ {
+ NewPage();
+ }
+ }
+
+ public void End()
+ {
+ EndPage();
+ }
+
+ private void Create()
+ {
+ NewPage();
+ }
+
+ private void NewPage()
+ {
+ EndPage();
+ _byteCount = 0;
+ _dataFileName = Path.Combine(_pagesFolder, $"{_pageId}_{++_pageCount}.log");
+ _pageData = new FileStream(_dataFileName, FileMode.CreateNew);
+ _pageWriter = new StreamWriter(_pageData, System.Text.Encoding.UTF8);
+ }
+
+ private void EndPage()
+ {
+ if (_pageWriter != null)
+ {
+ _pageWriter.Flush();
+ _pageData.Flush();
+ //The StreamWriter object calls Dispose() on the provided Stream object when StreamWriter.Dispose is called.
+ _pageWriter.Dispose();
+ _pageWriter = null;
+ _pageData = null;
+ _jobServerQueue.QueueFileUpload(_timelineId, _timelineRecordId, "DistributedTask.Core.Log", "CustomToolLog", _dataFileName, true);
+ }
+ }
+ }
+}
diff --git a/src/Runner.Common/ProcessChannel.cs b/src/Runner.Common/ProcessChannel.cs
new file mode 100644
index 00000000000..14d367e1edf
--- /dev/null
+++ b/src/Runner.Common/ProcessChannel.cs
@@ -0,0 +1,100 @@
+using System;
+using System.IO;
+using System.IO.Pipes;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace GitHub.Runner.Common
+{
+ public delegate void StartProcessDelegate(string pipeHandleOut, string pipeHandleIn);
+
+ public enum MessageType
+ {
+ NotInitialized = -1,
+ NewJobRequest = 1,
+ CancelRequest = 2,
+ RunnerShutdown = 3,
+ OperatingSystemShutdown = 4
+ }
+
+ public struct WorkerMessage
+ {
+ public MessageType MessageType;
+ public string Body;
+ public WorkerMessage(MessageType messageType, string body)
+ {
+ MessageType = messageType;
+ Body = body;
+ }
+ }
+
+ [ServiceLocator(Default = typeof(ProcessChannel))]
+ public interface IProcessChannel : IDisposable, IRunnerService
+ {
+ void StartServer(StartProcessDelegate startProcess);
+ void StartClient(string pipeNameInput, string pipeNameOutput);
+
+ Task SendAsync(MessageType messageType, string body, CancellationToken cancellationToken);
+ Task ReceiveAsync(CancellationToken cancellationToken);
+ }
+
+ public sealed class ProcessChannel : RunnerService, IProcessChannel
+ {
+ private AnonymousPipeServerStream _inServer;
+ private AnonymousPipeServerStream _outServer;
+ private AnonymousPipeClientStream _inClient;
+ private AnonymousPipeClientStream _outClient;
+ private StreamString _writeStream;
+ private StreamString _readStream;
+
+ public void StartServer(StartProcessDelegate startProcess)
+ {
+ _outServer = new AnonymousPipeServerStream(PipeDirection.Out, HandleInheritability.Inheritable);
+ _inServer = new AnonymousPipeServerStream(PipeDirection.In, HandleInheritability.Inheritable);
+ _readStream = new StreamString(_inServer);
+ _writeStream = new StreamString(_outServer);
+ startProcess(_outServer.GetClientHandleAsString(), _inServer.GetClientHandleAsString());
+ _outServer.DisposeLocalCopyOfClientHandle();
+ _inServer.DisposeLocalCopyOfClientHandle();
+ }
+
+ public void StartClient(string pipeNameInput, string pipeNameOutput)
+ {
+ _inClient = new AnonymousPipeClientStream(PipeDirection.In, pipeNameInput);
+ _outClient = new AnonymousPipeClientStream(PipeDirection.Out, pipeNameOutput);
+ _readStream = new StreamString(_inClient);
+ _writeStream = new StreamString(_outClient);
+ }
+
+ public async Task SendAsync(MessageType messageType, string body, CancellationToken cancellationToken)
+ {
+ await _writeStream.WriteInt32Async((int)messageType, cancellationToken);
+ await _writeStream.WriteStringAsync(body, cancellationToken);
+ }
+
+ public async Task ReceiveAsync(CancellationToken cancellationToken)
+ {
+ WorkerMessage result = new WorkerMessage(MessageType.NotInitialized, string.Empty);
+ result.MessageType = (MessageType)await _readStream.ReadInt32Async(cancellationToken);
+ result.Body = await _readStream.ReadStringAsync(cancellationToken);
+ return result;
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ GC.SuppressFinalize(this);
+ }
+
+ private void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _inServer?.Dispose();
+ _outServer?.Dispose();
+ _inClient?.Dispose();
+ _outClient?.Dispose();
+ }
+ }
+ }
+}
diff --git a/src/Runner.Common/ProcessExtensions.cs b/src/Runner.Common/ProcessExtensions.cs
new file mode 100644
index 00000000000..5e3bbd35ba1
--- /dev/null
+++ b/src/Runner.Common/ProcessExtensions.cs
@@ -0,0 +1,396 @@
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+using System;
+using System.Collections.Concurrent;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace GitHub.Runner.Common
+{
+#if OS_WINDOWS
+ public static class WindowsProcessExtensions
+ {
+ // Reference: https://blogs.msdn.microsoft.com/matt_pietrek/2004/08/25/reading-another-processs-environment/
+ // Reference: http://blog.gapotchenko.com/eazfuscator.net/reading-environment-variables
+ public static string GetEnvironmentVariable(this Process process, IHostContext hostContext, string variable)
+ {
+ var trace = hostContext.GetTrace(nameof(WindowsProcessExtensions));
+ Dictionary environmentVariables = new Dictionary(StringComparer.OrdinalIgnoreCase);
+ IntPtr processHandle = process.SafeHandle.DangerousGetHandle();
+
+ IntPtr environmentBlockAddress;
+ if (Environment.Is64BitOperatingSystem)
+ {
+ PROCESS_BASIC_INFORMATION64 pbi = new PROCESS_BASIC_INFORMATION64();
+ int returnLength = 0;
+ int status = NtQueryInformationProcess64(processHandle, PROCESSINFOCLASS.ProcessBasicInformation, ref pbi, Marshal.SizeOf(pbi), ref returnLength);
+ if (status != 0)
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ bool wow64;
+ if (!IsWow64Process(processHandle, out wow64))
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ if (!wow64)
+ {
+ // 64 bits process on 64 bits OS
+ IntPtr UserProcessParameterAddress = ReadIntPtr64(processHandle, new IntPtr(pbi.PebBaseAddress) + 0x20);
+ environmentBlockAddress = ReadIntPtr64(processHandle, UserProcessParameterAddress + 0x80);
+ }
+ else
+ {
+ // 32 bits process on 64 bits OS
+ IntPtr UserProcessParameterAddress = ReadIntPtr32(processHandle, new IntPtr(pbi.PebBaseAddress) + 0x1010);
+ environmentBlockAddress = ReadIntPtr32(processHandle, UserProcessParameterAddress + 0x48);
+ }
+ }
+ else
+ {
+ PROCESS_BASIC_INFORMATION32 pbi = new PROCESS_BASIC_INFORMATION32();
+ int returnLength = 0;
+ int status = NtQueryInformationProcess32(processHandle, PROCESSINFOCLASS.ProcessBasicInformation, ref pbi, Marshal.SizeOf(pbi), ref returnLength);
+ if (status != 0)
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ // 32 bits process on 32 bits OS
+ IntPtr UserProcessParameterAddress = ReadIntPtr32(processHandle, new IntPtr(pbi.PebBaseAddress) + 0x10);
+ environmentBlockAddress = ReadIntPtr32(processHandle, UserProcessParameterAddress + 0x48);
+ }
+
+ MEMORY_BASIC_INFORMATION memInfo = new MEMORY_BASIC_INFORMATION();
+ if (VirtualQueryEx(processHandle, environmentBlockAddress, ref memInfo, Marshal.SizeOf(memInfo)) == 0)
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ Int64 dataSize = memInfo.RegionSize.ToInt64() - (environmentBlockAddress.ToInt64() - memInfo.BaseAddress.ToInt64());
+
+ byte[] envData = new byte[dataSize];
+ IntPtr res_len = IntPtr.Zero;
+ if (!ReadProcessMemory(processHandle, environmentBlockAddress, envData, new IntPtr(dataSize), ref res_len))
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ if (res_len.ToInt64() != dataSize)
+ {
+ throw new ArgumentOutOfRangeException(nameof(ReadProcessMemory));
+ }
+
+ string environmentVariableString;
+ Int64 environmentVariableBytesLength = 0;
+ // check env encoding
+ if (envData[0] != 0 && envData[1] == 0)
+ {
+ // Unicode
+ for (Int64 index = 0; index < dataSize; index++)
+ {
+ // Unicode encoded environment variables block ends up with '\0\0\0\0'.
+ if (environmentVariableBytesLength == 0 &&
+ envData[index] == 0 &&
+ index + 3 < dataSize &&
+ envData[index + 1] == 0 &&
+ envData[index + 2] == 0 &&
+ envData[index + 3] == 0)
+ {
+ environmentVariableBytesLength = index + 3;
+ }
+ else if (environmentVariableBytesLength != 0)
+ {
+ // set it '\0' so we can easily trim it, most array method doesn't take int64
+ envData[index] = 0;
+ }
+ }
+
+ if (environmentVariableBytesLength == 0)
+ {
+ throw new ArgumentException(nameof(environmentVariableBytesLength));
+ }
+
+ environmentVariableString = Encoding.Unicode.GetString(envData);
+ }
+ else if (envData[0] != 0 && envData[1] != 0)
+ {
+ // ANSI
+ for (Int64 index = 0; index < dataSize; index++)
+ {
+ // Unicode encoded environment variables block ends up with '\0\0'.
+ if (environmentVariableBytesLength == 0 &&
+ envData[index] == 0 &&
+ index + 1 < dataSize &&
+ envData[index + 1] == 0)
+ {
+ environmentVariableBytesLength = index + 1;
+ }
+ else if (environmentVariableBytesLength != 0)
+ {
+ // set it '\0' so we can easily trim it, most array method doesn't take int64
+ envData[index] = 0;
+ }
+ }
+
+ if (environmentVariableBytesLength == 0)
+ {
+ throw new ArgumentException(nameof(environmentVariableBytesLength));
+ }
+
+ environmentVariableString = Encoding.Default.GetString(envData);
+ }
+ else
+ {
+ throw new ArgumentException(nameof(envData));
+ }
+
+ foreach (var envString in environmentVariableString.Split("\0", StringSplitOptions.RemoveEmptyEntries))
+ {
+ string[] env = envString.Split("=", 2);
+ if (!string.IsNullOrEmpty(env[0]))
+ {
+ environmentVariables[env[0]] = env[1];
+ trace.Verbose($"PID:{process.Id} ({env[0]}={env[1]})");
+ }
+ }
+
+ if (environmentVariables.TryGetValue(variable, out string envVariable))
+ {
+ return envVariable;
+ }
+ else
+ {
+ return null;
+ }
+ }
+
+ private static IntPtr ReadIntPtr32(IntPtr hProcess, IntPtr ptr)
+ {
+ IntPtr readPtr = IntPtr.Zero;
+ IntPtr data = Marshal.AllocHGlobal(sizeof(Int32));
+ try
+ {
+ IntPtr res_len = IntPtr.Zero;
+ if (!ReadProcessMemory(hProcess, ptr, data, new IntPtr(sizeof(Int32)), ref res_len))
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ if (res_len.ToInt32() != sizeof(Int32))
+ {
+ throw new ArgumentOutOfRangeException(nameof(ReadProcessMemory));
+ }
+
+ readPtr = new IntPtr(Marshal.ReadInt32(data));
+ }
+ finally
+ {
+ Marshal.FreeHGlobal(data);
+ }
+
+ return readPtr;
+ }
+
+ private static IntPtr ReadIntPtr64(IntPtr hProcess, IntPtr ptr)
+ {
+ IntPtr readPtr = IntPtr.Zero;
+ IntPtr data = Marshal.AllocHGlobal(IntPtr.Size);
+ try
+ {
+ IntPtr res_len = IntPtr.Zero;
+ if (!ReadProcessMemory(hProcess, ptr, data, new IntPtr(sizeof(Int64)), ref res_len))
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ if (res_len.ToInt32() != IntPtr.Size)
+ {
+ throw new ArgumentOutOfRangeException(nameof(ReadProcessMemory));
+ }
+
+ readPtr = Marshal.ReadIntPtr(data);
+ }
+ finally
+ {
+ Marshal.FreeHGlobal(data);
+ }
+
+ return readPtr;
+ }
+
+ private enum PROCESSINFOCLASS : int
+ {
+ ProcessBasicInformation = 0
+ };
+
+ [StructLayout(LayoutKind.Sequential)]
+ private struct MEMORY_BASIC_INFORMATION
+ {
+ public IntPtr BaseAddress;
+ public IntPtr AllocationBase;
+ public int AllocationProtect;
+ public IntPtr RegionSize;
+ public int State;
+ public int Protect;
+ public int Type;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ private struct PROCESS_BASIC_INFORMATION64
+ {
+ public long ExitStatus;
+ public long PebBaseAddress;
+ public long AffinityMask;
+ public long BasePriority;
+ public long UniqueProcessId;
+ public long InheritedFromUniqueProcessId;
+ };
+
+ [StructLayout(LayoutKind.Sequential)]
+ private struct PROCESS_BASIC_INFORMATION32
+ {
+ public int ExitStatus;
+ public int PebBaseAddress;
+ public int AffinityMask;
+ public int BasePriority;
+ public int UniqueProcessId;
+ public int InheritedFromUniqueProcessId;
+ };
+
+ [DllImport("ntdll.dll", SetLastError = true, EntryPoint = "NtQueryInformationProcess")]
+ private static extern int NtQueryInformationProcess64(IntPtr processHandle, PROCESSINFOCLASS processInformationClass, ref PROCESS_BASIC_INFORMATION64 processInformation, int processInformationLength, ref int returnLength);
+
+ [DllImport("ntdll.dll", SetLastError = true, EntryPoint = "NtQueryInformationProcess")]
+ private static extern int NtQueryInformationProcess32(IntPtr processHandle, PROCESSINFOCLASS processInformationClass, ref PROCESS_BASIC_INFORMATION32 processInformation, int processInformationLength, ref int returnLength);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool IsWow64Process(IntPtr processHandle, out bool wow64Process);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool ReadProcessMemory(IntPtr hProcess, IntPtr lpBaseAddress, IntPtr lpBuffer, IntPtr dwSize, ref IntPtr lpNumberOfBytesRead);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool ReadProcessMemory(IntPtr hProcess, IntPtr lpBaseAddress, [Out] byte[] lpBuffer, IntPtr dwSize, ref IntPtr lpNumberOfBytesRead);
+
+ [DllImport("kernel32.dll")]
+ private static extern int VirtualQueryEx(IntPtr processHandle, IntPtr baseAddress, ref MEMORY_BASIC_INFORMATION memoryInformation, int memoryInformationLength);
+ }
+#else
+ public static class LinuxProcessExtensions
+ {
+ public static string GetEnvironmentVariable(this Process process, IHostContext hostContext, string variable)
+ {
+ var trace = hostContext.GetTrace(nameof(LinuxProcessExtensions));
+ Dictionary env = new Dictionary();
+
+ if (Directory.Exists("/proc"))
+ {
+ string envFile = $"/proc/{process.Id}/environ";
+ trace.Info($"Read env from {envFile}");
+ string envContent = File.ReadAllText(envFile);
+ if (!string.IsNullOrEmpty(envContent))
+ {
+ // on linux, environment variables are seprated by '\0'
+ var envList = envContent.Split('\0', StringSplitOptions.RemoveEmptyEntries);
+ foreach (var envStr in envList)
+ {
+ // split on the first '='
+ var keyValuePair = envStr.Split('=', 2);
+ if (keyValuePair.Length == 2)
+ {
+ env[keyValuePair[0]] = keyValuePair[1];
+ trace.Verbose($"PID:{process.Id} ({keyValuePair[0]}={keyValuePair[1]})");
+ }
+ }
+ }
+ }
+ else
+ {
+ // On OSX, there is no /proc folder for us to read environment for given process,
+ // So we have call `ps e -p -o command` to print out env to STDOUT,
+ // However, the output env are not format in a parseable way, it's just a string that concatenate all envs with space,
+ // It doesn't escape '=' or ' ', so we can't parse the output into a dictionary of all envs.
+ // So we only look for the env you request, in the format of variable=value. (it won't work if you variable contains = or space)
+ trace.Info($"Read env from output of `ps e -p {process.Id} -o command`");
+ List psOut = new List();
+ object outputLock = new object();
+ using (var p = hostContext.CreateService())
+ {
+ p.OutputDataReceived += delegate (object sender, ProcessDataReceivedEventArgs stdout)
+ {
+ if (!string.IsNullOrEmpty(stdout.Data))
+ {
+ lock (outputLock)
+ {
+ psOut.Add(stdout.Data);
+ }
+ }
+ };
+
+ p.ErrorDataReceived += delegate (object sender, ProcessDataReceivedEventArgs stderr)
+ {
+ if (!string.IsNullOrEmpty(stderr.Data))
+ {
+ lock (outputLock)
+ {
+ trace.Error(stderr.Data);
+ }
+ }
+ };
+
+ int exitCode = p.ExecuteAsync(workingDirectory: hostContext.GetDirectory(WellKnownDirectory.Root),
+ fileName: "ps",
+ arguments: $"e -p {process.Id} -o command",
+ environment: null,
+ cancellationToken: CancellationToken.None).GetAwaiter().GetResult();
+ if (exitCode == 0)
+ {
+ trace.Info($"Successfully dump environment variables for {process.Id}");
+ if (psOut.Count > 0)
+ {
+ string psOutputString = string.Join(" ", psOut);
+ trace.Verbose($"ps output: '{psOutputString}'");
+
+ int varStartIndex = psOutputString.IndexOf(variable, StringComparison.Ordinal);
+ if (varStartIndex >= 0)
+ {
+ string rightPart = psOutputString.Substring(varStartIndex + variable.Length + 1);
+ if (rightPart.IndexOf(' ') > 0)
+ {
+ string value = rightPart.Substring(0, rightPart.IndexOf(' '));
+ env[variable] = value;
+ }
+ else
+ {
+ env[variable] = rightPart;
+ }
+
+ trace.Verbose($"PID:{process.Id} ({variable}={env[variable]})");
+ }
+ }
+ }
+ }
+ }
+
+ if (env.TryGetValue(variable, out string envVariable))
+ {
+ return envVariable;
+ }
+ else
+ {
+ return null;
+ }
+ }
+ }
+#endif
+}
diff --git a/src/Runner.Common/ProcessInvoker.cs b/src/Runner.Common/ProcessInvoker.cs
new file mode 100644
index 00000000000..021c6db5e02
--- /dev/null
+++ b/src/Runner.Common/ProcessInvoker.cs
@@ -0,0 +1,329 @@
+using GitHub.Runner.Common.Util;
+using GitHub.Runner.Sdk;
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Threading;
+using System.Threading.Channels;
+using System.Threading.Tasks;
+
+namespace GitHub.Runner.Common
+{
+ [ServiceLocator(Default = typeof(ProcessInvokerWrapper))]
+ public interface IProcessInvoker : IDisposable, IRunnerService
+ {
+ event EventHandler OutputDataReceived;
+ event EventHandler ErrorDataReceived;
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ CancellationToken cancellationToken);
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ CancellationToken cancellationToken);
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ CancellationToken cancellationToken);
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ CancellationToken cancellationToken);
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ Channel redirectStandardIn,
+ CancellationToken cancellationToken);
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ Channel redirectStandardIn,
+ bool inheritConsoleHandler,
+ CancellationToken cancellationToken);
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ Channel redirectStandardIn,
+ bool inheritConsoleHandler,
+ bool keepStandardInOpen,
+ CancellationToken cancellationToken);
+
+ Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ Channel redirectStandardIn,
+ bool inheritConsoleHandler,
+ bool keepStandardInOpen,
+ bool highPriorityProcess,
+ CancellationToken cancellationToken);
+ }
+
+ // The implementation of the process invoker does not hook up DataReceivedEvent and ErrorReceivedEvent of Process,
+ // instead, we read both STDOUT and STDERR stream manually on seperate thread.
+ // The reason is we find a huge perf issue about process STDOUT/STDERR with those events.
+ //
+ // Missing functionalities:
+ // 1. Cancel/Kill process tree
+ // 2. Make sure STDOUT and STDERR not process out of order
+ public sealed class ProcessInvokerWrapper : RunnerService, IProcessInvoker
+ {
+ private ProcessInvoker _invoker;
+
+ public override void Initialize(IHostContext hostContext)
+ {
+ base.Initialize(hostContext);
+ _invoker = new ProcessInvoker(Trace);
+ }
+
+ public event EventHandler OutputDataReceived;
+ public event EventHandler ErrorDataReceived;
+
+ public Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ CancellationToken cancellationToken)
+ {
+ return ExecuteAsync(
+ workingDirectory: workingDirectory,
+ fileName: fileName,
+ arguments: arguments,
+ environment: environment,
+ requireExitCodeZero: false,
+ cancellationToken: cancellationToken);
+ }
+
+ public Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ CancellationToken cancellationToken)
+ {
+ return ExecuteAsync(
+ workingDirectory: workingDirectory,
+ fileName: fileName,
+ arguments: arguments,
+ environment: environment,
+ requireExitCodeZero: requireExitCodeZero,
+ outputEncoding: null,
+ cancellationToken: cancellationToken);
+ }
+
+ public Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ CancellationToken cancellationToken)
+ {
+ return ExecuteAsync(
+ workingDirectory: workingDirectory,
+ fileName: fileName,
+ arguments: arguments,
+ environment: environment,
+ requireExitCodeZero: requireExitCodeZero,
+ outputEncoding: outputEncoding,
+ killProcessOnCancel: false,
+ cancellationToken: cancellationToken);
+ }
+
+ public Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ CancellationToken cancellationToken)
+ {
+ return ExecuteAsync(
+ workingDirectory: workingDirectory,
+ fileName: fileName,
+ arguments: arguments,
+ environment: environment,
+ requireExitCodeZero: requireExitCodeZero,
+ outputEncoding: outputEncoding,
+ killProcessOnCancel: killProcessOnCancel,
+ redirectStandardIn: null,
+ cancellationToken: cancellationToken);
+ }
+
+ public Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ Channel redirectStandardIn,
+ CancellationToken cancellationToken)
+ {
+ return ExecuteAsync(
+ workingDirectory: workingDirectory,
+ fileName: fileName,
+ arguments: arguments,
+ environment: environment,
+ requireExitCodeZero: requireExitCodeZero,
+ outputEncoding: outputEncoding,
+ killProcessOnCancel: killProcessOnCancel,
+ redirectStandardIn: redirectStandardIn,
+ inheritConsoleHandler: false,
+ cancellationToken: cancellationToken
+ );
+ }
+
+ public Task ExecuteAsync(
+ string workingDirectory,
+ string fileName,
+ string arguments,
+ IDictionary environment,
+ bool requireExitCodeZero,
+ Encoding outputEncoding,
+ bool killProcessOnCancel,
+ Channel