BigW Consortium Gitlab

Commit 99e71dec by Luke Bennett

Merge branch '10-4-stable-prepare-rc7' into '10-4-stable'

Prepare 10.4 RC7 release See merge request gitlab-org/gitlab-ce!16519
parents 370a4654 31c28f21
......@@ -71,8 +71,7 @@ export default {
return sprintf(
_.escape(s__('ClusterIntegration|Prometheus is an open-source monitoring system with %{gitlabIntegrationLink} to monitor deployed applications.')), {
gitlabIntegrationLink: `<a href="https://docs.gitlab.com/ce/user/project/integrations/prometheus.html", target="_blank" rel="noopener noreferrer">
${_.escape(s__('ClusterIntegration|Gitlab Integration'))}
</a>`,
${_.escape(s__('ClusterIntegration|Gitlab Integration'))}</a>`,
},
false,
);
......
/* eslint-disable func-names, space-before-function-paren, no-var, prefer-rest-params, wrap-iife, quotes, no-underscore-dangle, one-var, one-var-declaration-per-line, consistent-return, dot-notation, quote-props, comma-dangle, object-shorthand, max-len, prefer-arrow-callback */
import 'vendor/jquery.waitforimages';
import { __ } from '~/locale';
import TaskList from './task_list';
import MergeRequestTabs from './merge_request_tabs';
import IssuablesHelper from './helpers/issuables_helper';
......@@ -110,24 +111,25 @@ MergeRequest.prototype.initCommitMessageListeners = function() {
});
};
MergeRequest.prototype.updateStatusText = function(classToRemove, classToAdd, newStatusText) {
MergeRequest.setStatusBoxToMerged = function() {
$('.detail-page-header .status-box')
.removeClass(classToRemove)
.addClass(classToAdd)
.removeClass('status-box-open')
.addClass('status-box-mr-merged')
.find('span')
.text(newStatusText);
.text(__('Merged'));
};
MergeRequest.prototype.decreaseCounter = function(by = 1) {
const $el = $('.nav-links .js-merge-counter');
MergeRequest.decreaseCounter = function(by = 1) {
const $el = $('.js-merge-counter');
const count = Math.max((parseInt($el.text().replace(/[^\d]/, ''), 10) - by), 0);
$el.text(addDelimiter(count));
};
MergeRequest.prototype.hideCloseButton = function() {
MergeRequest.hideCloseButton = function() {
const el = document.querySelector('.merge-request .js-issuable-actions');
const closeDropdownItem = el.querySelector('li.close-item');
if (closeDropdownItem) {
closeDropdownItem.classList.add('hidden');
// Selects the next dropdown item
......
import successSvg from 'icons/_icon_status_success.svg';
import warningSvg from 'icons/_icon_status_warning.svg';
import simplePoll from '~/lib/utils/simple_poll';
import MergeRequest from '../../../merge_request';
import Flash from '../../../flash';
import statusIcon from '../mr_widget_status_icon';
import eventHub from '../../event_hub';
......@@ -165,11 +166,9 @@ export default {
// If state is merged we should update the widget and stop the polling
eventHub.$emit('MRWidgetUpdateRequested');
eventHub.$emit('FetchActionsContent');
if (window.mergeRequest) {
window.mergeRequest.updateStatusText('status-box-open', 'status-box-merged', 'Merged');
window.mergeRequest.hideCloseButton();
window.mergeRequest.decreaseCounter();
}
MergeRequest.setStatusBoxToMerged();
MergeRequest.hideCloseButton();
MergeRequest.decreaseCounter();
stopPolling();
// If user checked remove source branch and we didn't remove the branch yet
......
......@@ -24,15 +24,13 @@
font-size: $gl-font-size;
line-height: 25px;
&.status-box-closed,
&.status-box-mr-closed {
background-color: $gl-danger;
}
&.status-box-issue-closed {
background-color: $gl-primary;
}
&.status-box-merged {
&.status-box-issue-closed,
&.status-box-mr-merged {
background-color: $gl-primary;
}
......
......@@ -146,6 +146,7 @@ module ApplicationSettingsHelper
:after_sign_up_text,
:akismet_api_key,
:akismet_enabled,
:authorized_keys_enabled,
:auto_devops_enabled,
:circuitbreaker_access_retries,
:circuitbreaker_check_interval,
......
......@@ -72,7 +72,7 @@ module IssuesHelper
if item.try(:expired?)
'status-box-expired'
elsif item.try(:merged?)
'status-box-merged'
'status-box-mr-merged'
elsif item.closed?
'status-box-mr-closed'
elsif item.try(:upcoming?)
......
......@@ -53,8 +53,16 @@ module TodosHelper
def todo_target_state_pill(todo)
return unless show_todo_state?(todo)
type =
case todo.target
when MergeRequest
'mr'
when Issue
'issue'
end
content_tag(:span, nil, class: 'target-status') do
content_tag(:span, nil, class: "status-box status-box-#{todo.target.state.dasherize}") do
content_tag(:span, nil, class: "status-box status-box-#{type}-#{todo.target.state.dasherize}") do
todo.target.state.capitalize
end
end
......
......@@ -261,6 +261,7 @@ class ApplicationSetting < ActiveRecord::Base
{
after_sign_up_text: nil,
akismet_enabled: false,
authorized_keys_enabled: true, # TODO default to false if the instance is configured to use AuthorizedKeysCommand
container_registry_token_expire_delay: 5,
default_artifacts_expire_in: '30 days',
default_branch_protection: Settings.gitlab['default_branch_protection'],
......
......@@ -8,7 +8,7 @@ class Route < ActiveRecord::Base
presence: true,
uniqueness: { case_sensitive: false }
validate :ensure_permanent_paths
validate :ensure_permanent_paths, if: :path_changed?
after_create :delete_conflicting_redirects
after_update :delete_conflicting_redirects, if: :path_changed?
......
......@@ -54,6 +54,7 @@ module MergeRequests
source_project_id: project.id,
source_branch: branch_name,
target_project_id: project.id,
target_branch: ref,
milestone_id: issue.milestone_id
}
end
......
......@@ -775,6 +775,22 @@
= link_to icon('question-circle'), help_page_path('administration/polling')
%fieldset
%legend Performance optimization
.form-group
.col-sm-offset-2.col-sm-10
.checkbox
= f.label :authorized_keys_enabled do
= f.check_box :authorized_keys_enabled
Write to "authorized_keys" file
.help-block
By default, we write to the "authorized_keys" file to support Git
over SSH without additional configuration. GitLab can be optimized
to authenticate SSH keys via the database file. Only uncheck this
if you have configured your OpenSSH server to use the
AuthorizedKeysCommand. Click on the help icon for more details.
= link_to icon('question-circle'), help_page_path('administration/operations/fast_ssh_key_lookup')
%fieldset
%legend User and IP Rate Limits
.form-group
.col-sm-offset-2.col-sm-10
......
......@@ -31,11 +31,11 @@
- if current_user && can?(current_user, :fork_project, @project)
- if current_user.already_forked?(@project) && current_user.manageable_namespaces.size < 2
= link_to namespace_project_path(current_user, current_user.fork_of(@project)), title: 'Go to your fork', class: 'btn btn-new' do
= custom_icon('icon_fork')
= sprite_icon('fork', size: 12)
%span Fork
- else
= link_to new_project_fork_path(@project), title: "Fork project", class: 'btn btn-new' do
= custom_icon('icon_fork')
= sprite_icon('fork', size: 12)
%span Fork
......
......@@ -52,7 +52,7 @@
= render_project_pipeline_status(project.pipeline_status)
- if forks
%span.prepend-left-10
= sprite_icon('fork')
= sprite_icon('fork', size: 12)
= number_with_delimiter(project.forks_count)
- if stars
%span.prepend-left-10
......
module ProjectImportOptions
extend ActiveSupport::Concern
included do
IMPORT_RETRY_COUNT = 5
IMPORT_RETRY_COUNT = 5
included do
sidekiq_options retry: IMPORT_RETRY_COUNT, status_expiration: StuckImportJobsWorker::IMPORT_JOBS_EXPIRATION
# We only want to mark the project as failed once we exhausted all retries
......
---
title: Fix JavaScript bundle running on Cluster update/destroy pages
merge_request:
author:
type: fixed
---
title: Set target_branch to the ref branch when creating MR from issue
merge_request:
author:
type: fixed
---
title: Fix closed text for issues on Todos page
merge_request:
author:
type: fixed
---
title: Fix links to uploaded files on wiki pages
merge_request: 16499
author:
type: fixed
---
title: Fix giant fork icons on forks page
merge_request: 16474
author:
type: fixed
---
title: Backport fast database lookup of SSH authorized_keys from EE
merge_request: 16014
author:
type: added
---
title: Prevent RevList failing on non utf8 paths
merge_request: 16440
author:
type: fixed
---
title: Prevent invalid Route path if path is unchanged
merge_request: 16397
author:
type: fixed
---
title: Fixed merge request status badge not updating after merging
merge_request:
author:
type: fixed
# rubocop:disable all
class AddFingerprintIndex < ActiveRecord::Migration
disable_ddl_transaction!
DOWNTIME = false
# https://gitlab.com/gitlab-org/gitlab-ee/issues/764
def change
args = [:keys, :fingerprint]
if Gitlab::Database.postgresql?
args << { algorithm: :concurrently }
end
add_index(*args) unless index_exists?(:keys, :fingerprint)
end
end
# See http://doc.gitlab.com/ce/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class AddAuthorizedKeysEnabledToApplicationSettings < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
# Set this constant to true if this migration requires downtime.
DOWNTIME = false
disable_ddl_transaction!
def up
add_column_with_default :application_settings, :authorized_keys_enabled, :boolean, default: true, allow_null: false
end
def down
remove_column :application_settings, :authorized_keys_enabled
end
end
......@@ -154,6 +154,7 @@ ActiveRecord::Schema.define(version: 20171230123729) do
t.integer "gitaly_timeout_default", default: 55, null: false
t.integer "gitaly_timeout_medium", default: 30, null: false
t.integer "gitaly_timeout_fast", default: 10, null: false
t.boolean "authorized_keys_enabled", default: true, null: false
end
create_table "audit_events", force: :cascade do |t|
......
# Fast lookup of authorized SSH keys in the database
Regular SSH operations become slow as the number of users grows because OpenSSH
searches for a key to authorize a user via a linear search. In the worst case,
such as when the user is not authorized to access GitLab, OpenSSH will scan the
entire file to search for a key. This can take significant time and disk I/O,
which will delay users attempting to push or pull to a repository. Making
matters worse, if users add or remove keys frequently, the operating system may
not be able to cache the `authorized_keys` file, which causes the disk to be
accessed repeatedly.
GitLab Shell solves this by providing a way to authorize SSH users via a fast,
indexed lookup in the GitLab database. This page describes how to enable the fast
lookup of authorized SSH keys.
> **Warning:** OpenSSH version 6.9+ is required because
`AuthorizedKeysCommand` must be able to accept a fingerprint. These
instructions will break installations using older versions of OpenSSH, such as
those included with CentOS 6 as of September 2017. If you want to use this
feature for CentOS 6, follow [the instructions on how to build and install a custom OpenSSH package](#compiling-a-custom-version-of-openssh-for-centos-6) before continuing.
## Setting up fast lookup via GitLab Shell
GitLab Shell provides a way to authorize SSH users via a fast, indexed lookup
to the GitLab database. GitLab Shell uses the fingerprint of the SSH key to
check whether the user is authorized to access GitLab.
Create the directory `/opt/gitlab-shell` first:
```bash
sudo mkdir -p /opt/gitlab-shell
```
Create this file at `/opt/gitlab-shell/authorized_keys`:
```
#!/bin/bash
if [[ "$1" == "git" ]]; then
/opt/gitlab/embedded/service/gitlab-shell/bin/authorized_keys $2
fi
```
Set appropriate ownership and permissions:
```
sudo chown root:git /opt/gitlab-shell/authorized_keys
sudo chmod 0650 /opt/gitlab-shell/authorized_keys
```
Add the following to `/etc/ssh/sshd_config` or to `/assets/sshd_config` if you
are using Omnibus Docker:
```
AuthorizedKeysCommand /opt/gitlab-shell/authorized_keys %u %k
AuthorizedKeysCommandUser git
```
Reload OpenSSH:
```bash
# Debian or Ubuntu installations
sudo service ssh reload
# CentOS installations
sudo service sshd reload
```
Confirm that SSH is working by removing your user's SSH key in the UI, adding a
new one, and attempting to pull a repo.
> **Warning:** Do not disable writes until SSH is confirmed to be working
perfectly because the file will quickly become out-of-date.
In the case of lookup failures (which are not uncommon), the `authorized_keys`
file will still be scanned. So git SSH performance will still be slow for many
users as long as a large file exists.
You can disable any more writes to the `authorized_keys` file by unchecking
`Write to "authorized_keys" file` in the Application Settings of your GitLab
installation.
![Write to authorized keys setting](img/write_to_authorized_keys_setting.png)
Again, confirm that SSH is working by removing your user's SSH key in the UI,
adding a new one, and attempting to pull a repo.
Then you can backup and delete your `authorized_keys` file for best performance.
## How to go back to using the `authorized_keys` file
This is a brief overview. Please refer to the above instructions for more context.
1. [Rebuild the `authorized_keys` file](../raketasks/maintenance.md#rebuild-authorized_keys-file)
1. Enable writes to the `authorized_keys` file in Application Settings
1. Remove the `AuthorizedKeysCommand` lines from `/etc/ssh/sshd_config` or from `/assets/sshd_config` if you are using Omnibus Docker.
1. Reload sshd: `sudo service sshd reload`
1. Remove the `/opt/gitlab-shell/authorized_keys` file
## Compiling a custom version of OpenSSH for CentOS 6
Building a custom version of OpenSSH is not necessary for Ubuntu 16.04 users,
since Ubuntu 16.04 ships with OpenSSH 7.2.
It is also unnecessary for CentOS 7.4 users, as that version ships with
OpenSSH 7.4. If you are using CentOS 7.0 - 7.3, we strongly recommend that you
upgrade to CentOS 7.4 instead of following this procedure. This should be as
simple as running `yum update`.
CentOS 6 users must build their own OpenSSH package to enable SSH lookups via
the database. The following instructions can be used to build OpenSSH 7.5:
1. First, download the package and install the required packages:
```
sudo su -
cd /tmp
curl --remote-name https://mirrors.evowise.com/pub/OpenBSD/OpenSSH/portable/openssh-7.5p1.tar.gz
tar xzvf openssh-7.5p1.tar.gz
yum install rpm-build gcc make wget openssl-devel krb5-devel pam-devel libX11-devel xmkmf libXt-devel
```
3. Prepare the build by copying files to the right place:
```
mkdir -p /root/rpmbuild/{SOURCES,SPECS}
cp ./openssh-7.5p1/contrib/redhat/openssh.spec /root/rpmbuild/SPECS/
cp openssh-7.5p1.tar.gz /root/rpmbuild/SOURCES/
cd /root/rpmbuild/SPECS
```
3. Next, set the spec settings properly:
```
sed -i -e "s/%define no_gnome_askpass 0/%define no_gnome_askpass 1/g" openssh.spec
sed -i -e "s/%define no_x11_askpass 0/%define no_x11_askpass 1/g" openssh.spec
sed -i -e "s/BuildPreReq/BuildRequires/g" openssh.spec
```
3. Build the RPMs:
```
rpmbuild -bb openssh.spec
```
4. Ensure the RPMs were built:
```
ls -al /root/rpmbuild/RPMS/x86_64/
```
You should see something as the following:
```
total 1324
drwxr-xr-x. 2 root root 4096 Jun 20 19:37 .
drwxr-xr-x. 3 root root 19 Jun 20 19:37 ..
-rw-r--r--. 1 root root 470828 Jun 20 19:37 openssh-7.5p1-1.x86_64.rpm
-rw-r--r--. 1 root root 490716 Jun 20 19:37 openssh-clients-7.5p1-1.x86_64.rpm
-rw-r--r--. 1 root root 17020 Jun 20 19:37 openssh-debuginfo-7.5p1-1.x86_64.rpm
-rw-r--r--. 1 root root 367516 Jun 20 19:37 openssh-server-7.5p1-1.x86_64.rpm
```
5. Install the packages. OpenSSH packages will replace `/etc/pam.d/sshd`
with its own version, which may prevent users from logging in, so be sure
that the file is backed up and restored after installation:
```
timestamp=$(date +%s)
cp /etc/pam.d/sshd pam-ssh-conf-$timestamp
rpm -Uvh /root/rpmbuild/RPMS/x86_64/*.rpm
yes | cp pam-ssh-conf-$timestamp /etc/pam.d/sshd
```
6. Verify the installed version. In another window, attempt to login to the server:
```
ssh -v <your-centos-machine>
```
You should see a line that reads: "debug1: Remote protocol version 2.0, remote software version OpenSSH_7.5"
If not, you may need to restart sshd (e.g. `systemctl restart sshd.service`).
7. *IMPORTANT!* Open a new SSH session to your server before exiting to make
sure everything is working! If you need to downgrade, simple install the
older package:
```
# Only run this if you run into a problem logging in
yum downgrade openssh-server openssh openssh-clients
```
......@@ -13,4 +13,5 @@ by GitLab to another file system or another server.
that to prioritize important jobs.
- [Sidekiq MemoryKiller](sidekiq_memory_killer.md): Configure Sidekiq MemoryKiller
to restart Sidekiq.
- [Unicorn](unicorn.md): Understand Unicorn and unicorn-worker-killer.
\ No newline at end of file
- [Unicorn](unicorn.md): Understand Unicorn and unicorn-worker-killer.
- [Speed up SSH operations](fast_ssh_key_lookup.md): Authorize SSH users via a fast, indexed lookup to the GitLab database.
This document was moved to [another location](fast_ssh_key_lookup.md).
......@@ -54,13 +54,20 @@ Apart from those, here is an collection of tutorials and guides on setting up yo
- [Analyze code quality with the Code Climate CLI](code_climate.md)
### Other
### Static Application Security Testing (SAST)
- [Using `dpl` as deployment tool](deployment/README.md)
- [Repositories with examples for various languages](https://gitlab.com/groups/gitlab-examples)
- [The .gitlab-ci.yml file for GitLab itself](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/.gitlab-ci.yml)
- [Continuous Deployment with GitLab: how to build and deploy a Debian Package with GitLab CI](https://about.gitlab.com/2016/10/12/automated-debian-package-build-with-gitlab-ci/)
- [How to deploy Maven projects to Artifactory with GitLab CI/CD](artifactory_and_gitlab/index.md)
- **(EEU)** [Scan your code for vulnerabilities](https://docs.gitlab.com/ee/ci/examples/sast.html)
- [Scan your Docker images for vulnerabilities](sast_docker.md)
### Dynamic Application Security Testing (DAST)
Scan your app for vulnerabilities with GitLab [Dynamic Application Security Testing (DAST)](dast.md).
### Browser Performance Testing with Sitespeed.io
Analyze your [browser performance with Sitespeed.io](browser_performance.md).
### GitLab CI/CD for Review Apps
## GitLab CI/CD for GitLab Pages
......
# Dynamic Application Security Testing with GitLab CI/CD
This example shows how to run
[Dynamic Application Security Testing (DAST)](https://en.wikipedia.org/wiki/Dynamic_program_analysis)
on your project's source code by using GitLab CI/CD.
DAST is using the popular open source tool
[OWASP ZAProxy](https://github.com/zaproxy/zaproxy) to perform an analysis.
All you need is a GitLab Runner with the Docker executor (the shared Runners on
GitLab.com will work fine). You can then add a new job to `.gitlab-ci.yml`,
called `dast`:
```yaml
dast:
image: owasp/zap2docker-stable
script:
- mkdir /zap/wrk/
- /zap/zap-baseline.py -J gl-dast-report.json -t https://example.com || true
- cp /zap/wrk/gl-dast-report.json .
artifacts:
paths: [gl-dast-report.json]
```
The above example will create a `dast` job in your CI pipeline and will allow
you to download and analyze the report artifact in JSON format.
TIP: **Tip:**
Starting with [GitLab Enterprise Edition Ultimate][ee] 10.4, this information will
be automatically extracted and shown right in the merge request widget. To do
so, the CI job must be named `dast` and the artifact path must be
`gl-dast-report.json`.
[Learn more on dynamic application security testing results shown in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/dast.html).
[ee]: https://about.gitlab.com/gitlab-ee/
# Static Application Security Testing for Docker containers with GitLab CI/CD
You can check your Docker images (or more precisely the containers) for known
vulnerabilities by using [Clair](https://github.com/coreos/clair) and
[clair-scanner](https://github.com/arminc/clair-scanner), two open source tools
for Vulnerability Static Analysis for containers.
All you need is a GitLab Runner with the Docker executor (the shared Runners on
GitLab.com will work fine). You can then add a new job to `.gitlab-ci.yml`,
called `sast:container`:
```yaml
sast:container:
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
## Define two new variables based on GitLab's CI/CD predefined variables
## https://docs.gitlab.com/ee/ci/variables/#predefined-variables-environment-variables
CI_APPLICATION_REPOSITORY: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
CI_APPLICATION_TAG: $CI_COMMIT_SHA
allow_failure: true
services:
- docker:dind
script:
- docker run -d --name db arminc/clair-db:latest
- docker run -p 6060:6060 --link db:postgres -d --name clair arminc/clair-local-scan:v2.0.1
- apk add -U wget ca-certificates
- docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG}
- wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
- mv clair-scanner_linux_amd64 clair-scanner
- chmod +x clair-scanner
- touch clair-whitelist.yml
- ./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-sast-container-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true
artifacts:
paths: [gl-sast-container-report.json]
```
The above example will create a `sast:container` job in your CI/CD pipeline, pull
the image from the [Container Registry](../../user/project/container_registry.md)
(whose name is defined from the two `CI_APPLICATION_` variables) and scan it
for possible vulnerabilities. The report will be saved as an artifact that you
can later download and analyze.
If you want to whitelist some specific vulnerabilities, you can do so by defining
them in a [YAML file](https://github.com/arminc/clair-scanner/blob/master/README.md#example-whitelist-yaml-file),
in our case its named `clair-whitelist.yml`.
TIP: **Tip:**
Starting with [GitLab Enterprise Edition Ultimate][ee] 10.4, this information will
be automatically extracted and shown right in the merge request widget. To do
so, the CI/CD job must be named `sast:container` and the artifact path must be
`gl-sast-container-report.json`.
[Learn more on application security testing results shown in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/sast_docker.html).
[ee]: https://about.gitlab.com/gitlab-ee/
......@@ -20,6 +20,8 @@ project in an easy and automatic way:
1. [Auto Test](#auto-test)
1. [Auto Code Quality](#auto-code-quality)
1. [Auto SAST (Static Application Security Testing)](#auto-sast)
1. [Auto SAST for Docker images](#auto-sast-for-docker-images)
1. [Auto DAST (Dynamic Application Security Testing)](#auto-dast)
1. [Auto Browser Performance Testing](#auto-browser-performance-testing)
1. [Auto Review Apps](#auto-review-apps)
1. [Auto Deploy](#auto-deploy)
......@@ -193,8 +195,10 @@ Auto Code Quality uses the open source
[`codeclimate` image](https://hub.docker.com/r/codeclimate/codeclimate/) to run
static analysis and other code checks on the current code. The report is
created, and is uploaded as an artifact which you can later download and check
out. In GitLab Enterprise Edition Starter, differences between the source and
target branches are
out.
In GitLab Enterprise Edition Starter, differences between the source and
target branches are also
[shown in the merge request widget](https://docs.gitlab.com/ee/user/project/merge_requests/code_quality_diff.html).
### Auto SAST
......@@ -207,7 +211,34 @@ analysis on the current code and checks for potential security issues. Once the
report is created, it's uploaded as an artifact which you can later download and
check out.
Any security warnings are also [shown in the merge request widget](https://docs.gitlab.com/ee/user/project/merge_requests/sast.html).
In GitLab Enterprise Edition Ultimate, any security warnings are also
[shown in the merge request widget](https://docs.gitlab.com/ee/user/project/merge_requests/sast.html).
### Auto SAST for Docker images
> Introduced in GitLab 10.4.
Vulnerability Static Analysis for containers uses
[Clair](https://github.com/coreos/clair) to run static analysis on a
Docker image and checks for potential security issues. Once the report is
created, it's uploaded as an artifact which you can later download and
check out.
In GitLab Enterprise Edition Ultimate, any security warnings are also
[shown in the merge request widget](https://docs.gitlab.com/ee/user/project/merge_requests/sast_docker.html).
### Auto DAST
> Introduced in [GitLab Enterprise Edition Ultimate][ee] 10.4.
Dynamic Application Security Testing (DAST) uses the
popular open source tool [OWASP ZAProxy](https://github.com/zaproxy/zaproxy)
to perform an analysis on the current code and checks for potential security
issues. Once the report is created, it's uploaded as an artifact which you can
later download and check out.
In GitLab Enterprise Edition Ultimate, any security warnings are also
[shown in the merge request widget](https://docs.gitlab.com/ee/user/project/merge_requests/dast.html).
### Auto Browser Performance Testing
......
......@@ -82,6 +82,18 @@ module API
end
#
# Get a ssh key using the fingerprint
#
get "/authorized_keys" do
fingerprint = params.fetch(:fingerprint) do
Gitlab::InsecureKeyFingerprint.new(params.fetch(:key)).fingerprint
end
key = Key.find_by(fingerprint: fingerprint)
not_found!("Key") if key.nil?
present key, with: Entities::SSHKey
end
#
# Discover user by ssh key or user id
#
get "/discover" do
......
......@@ -38,6 +38,7 @@ module API
builds = user_project.builds.order('id DESC')
builds = filter_builds(builds, params[:scope])
builds = builds.preload(:user, :job_artifacts_archive, :runner, pipeline: :project)
present paginate(builds), with: Entities::Job
end
......
......@@ -36,6 +36,7 @@ module API
builds = user_project.builds.order('id DESC')
builds = filter_builds(builds, params[:scope])
builds = builds.preload(:user, :job_artifacts_archive, :runner, pipeline: :project)
present paginate(builds), with: ::API::V3::Entities::Build
end
......
......@@ -9,6 +9,10 @@ module Banzai
end
def apply_rules
# Special case: relative URLs beginning with `/uploads/` refer to
# user-uploaded files and will be handled elsewhere.
return @uri.to_s if @uri.relative? && @uri.path.starts_with?('/uploads/')
apply_file_link_rules!
apply_hierarchical_link_rules!
apply_relative_link_rules!
......
......@@ -95,7 +95,7 @@ module Gitlab
object_output.map do |output_line|
sha, path = output_line.split(' ', 2)
next if require_path && path.blank?
next if require_path && path.to_s.empty?
sha
end.reject(&:nil?)
......
module Gitlab
#
# Calculates the fingerprint of a given key without using
# openssh key validations. For this reason, only use
# for calculating the fingerprint to find the key with it.
#
# DO NOT use it for checking the validity of a ssh key.
#
class InsecureKeyFingerprint
attr_accessor :key
#
# Gets the base64 encoded string representing a rsa or dsa key
#
def initialize(key_base64)
@key = key_base64
end
def fingerprint
OpenSSL::Digest::MD5.hexdigest(Base64.decode64(@key)).scan(/../).join(':')
end
end
end
......@@ -36,7 +36,11 @@ module Gitlab
def complete_command(namespace_name)
return unless chart
"helm install #{chart} --name #{name} --namespace #{namespace_name} >/dev/null"
if chart_values_file
"helm install #{chart} --name #{name} --namespace #{namespace_name} -f /data/helm/#{name}/config/values.yaml >/dev/null"
else
"helm install #{chart} --name #{name} --namespace #{namespace_name} >/dev/null"
end
end
def install_dps_command
......
......@@ -10,9 +10,10 @@ module Gitlab
def generate
spec = { containers: [container_specification], restartPolicy: 'Never' }
if command.chart_values_file
generate_config_map
spec['volumes'] = volumes_specification
create_config_map
spec[:volumes] = volumes_specification
end
::Kubeclient::Resource.new(metadata: metadata, spec: spec)
end
......@@ -34,19 +35,39 @@ module Gitlab
end
def labels
{ 'gitlab.org/action': 'install', 'gitlab.org/application': command.name }
{
'gitlab.org/action': 'install',
'gitlab.org/application': command.name
}
end
def metadata
{ name: command.pod_name, namespace: namespace_name, labels: labels }
{
name: command.pod_name,
namespace: namespace_name,
labels: labels
}
end
def volume_mounts_specification
[{ name: 'config-volume', mountPath: '/etc/config' }]
[
{
name: 'configuration-volume',
mountPath: "/data/helm/#{command.name}/config"
}
]
end
def volumes_specification
[{ name: 'config-volume', configMap: { name: 'values-config' } }]
[
{
name: 'configuration-volume',
configMap: {
name: 'values-content-configuration',
items: [{ key: 'values', path: 'values.yaml' }]
}
}
]
end
def generate_pod_env(command)
......@@ -57,10 +78,10 @@ module Gitlab
}.map { |key, value| { name: key, value: value } }
end
def generate_config_map
def create_config_map
resource = ::Kubeclient::Resource.new
resource.metadata = { name: 'values-config', namespace: namespace_name }
resource.data = YAML.load_file(command.chart_values_file)
resource.metadata = { name: 'values-content-configuration', namespace: namespace_name, labels: { name: 'values-content-configuration' } }
resource.data = { values: File.read(command.chart_values_file) }
kubeclient.create_config_map(resource)
end
end
......
......@@ -183,6 +183,8 @@ module Gitlab
# add_key("key-42", "sha-rsa ...")
#
def add_key(key_id, key_content)
return unless self.authorized_keys_enabled?
gitlab_shell_fast_execute([gitlab_shell_keys_path,
'add-key', key_id, self.class.strip_key(key_content)])
end
......@@ -192,6 +194,8 @@ module Gitlab
# Ex.
# batch_add_keys { |adder| adder.add_key("key-42", "sha-rsa ...") }
def batch_add_keys(&block)
return unless self.authorized_keys_enabled?
IO.popen(%W(#{gitlab_shell_path}/bin/gitlab-keys batch-add-keys), 'w') do |io|
yield(KeyAdder.new(io))
end
......@@ -202,10 +206,11 @@ module Gitlab
# Ex.
# remove_key("key-342", "sha-rsa ...")
#
def remove_key(key_id, key_content)
def remove_key(key_id, key_content = nil)
return unless self.authorized_keys_enabled?
args = [gitlab_shell_keys_path, 'rm-key', key_id]
args << key_content if key_content
gitlab_shell_fast_execute(args)
end
......@@ -215,9 +220,62 @@ module Gitlab
# remove_all_keys
#
def remove_all_keys
return unless self.authorized_keys_enabled?
gitlab_shell_fast_execute([gitlab_shell_keys_path, 'clear'])
end
# Remove ssh keys from gitlab shell that are not in the DB
#
# Ex.
# remove_keys_not_found_in_db
#
def remove_keys_not_found_in_db
return unless self.authorized_keys_enabled?
Rails.logger.info("Removing keys not found in DB")
batch_read_key_ids do |ids_in_file|
ids_in_file.uniq!
keys_in_db = Key.where(id: ids_in_file)
next unless ids_in_file.size > keys_in_db.count # optimization
ids_to_remove = ids_in_file - keys_in_db.pluck(:id)
ids_to_remove.each do |id|
Rails.logger.info("Removing key-#{id} not found in DB")
remove_key("key-#{id}")
end
end
end
# Iterate over all ssh key IDs from gitlab shell, in batches
#
# Ex.
# batch_read_key_ids { |batch| keys = Key.where(id: batch) }
#
def batch_read_key_ids(batch_size: 100, &block)
return unless self.authorized_keys_enabled?
list_key_ids do |key_id_stream|
key_id_stream.lazy.each_slice(batch_size) do |lines|
key_ids = lines.map { |l| l.chomp.to_i }
yield(key_ids)
end
end
end
# Stream all ssh key IDs from gitlab shell, separated by newlines
#
# Ex.
# list_key_ids
#
def list_key_ids(&block)
return unless self.authorized_keys_enabled?
IO.popen(%W(#{gitlab_shell_path}/bin/gitlab-keys list-key-ids), &block)
end
# Add empty directory for storing repositories
#
# Ex.
......@@ -333,6 +391,14 @@ module Gitlab
File.join(gitlab_shell_path, 'bin', 'gitlab-keys')
end
def authorized_keys_enabled?
# Return true if nil to ensure the authorized_keys methods work while
# fixing the authorized_keys file during migration.
return true if Gitlab::CurrentSettings.current_application_settings.authorized_keys_enabled.nil?
Gitlab::CurrentSettings.current_application_settings.authorized_keys_enabled
end
private
def gitlab_projects(shard_path, disk_path)
......
FactoryBot.define do
factory :redirect_route do
sequence(:path) { |n| "redirect#{n}" }
source factory: :group
permanent false
trait :permanent do
permanent true
end
trait :temporary do
permanent false
end
end
end
......@@ -63,8 +63,8 @@ import IssuablesHelper from '~/helpers/issuables_helper';
beforeEach(() => {
loadFixtures('merge_requests/merge_request_with_task_list.html.raw');
this.el = document.querySelector('.js-issuable-actions');
const merge = new MergeRequest();
merge.hideCloseButton();
new MergeRequest(); // eslint-disable-line no-new
MergeRequest.hideCloseButton();
});
it('hides the dropdown close item and selects the next item', () => {
......@@ -83,8 +83,7 @@ import IssuablesHelper from '~/helpers/issuables_helper';
beforeEach(() => {
loadFixtures('merge_requests/merge_request_of_current_user.html.raw');
this.el = document.querySelector('.js-issuable-actions');
const merge = new MergeRequest();
merge.hideCloseButton();
MergeRequest.hideCloseButton();
});
it('hides the close button', () => {
......
......@@ -371,6 +371,10 @@ describe('MRWidgetReadyToMerge', () => {
});
});
beforeEach(() => {
loadFixtures('merge_requests/merge_request_of_current_user.html.raw');
});
it('should call start and stop polling when MR merged', (done) => {
spyOn(eventHub, '$emit');
spyOn(vm.service, 'poll').and.returnValue(returnPromise('merged'));
......@@ -392,6 +396,47 @@ describe('MRWidgetReadyToMerge', () => {
}, 333);
});
it('updates status box', (done) => {
spyOn(vm.service, 'poll').and.returnValue(returnPromise('merged'));
spyOn(vm, 'initiateRemoveSourceBranchPolling');
vm.handleMergePolling(() => {}, () => {});
setTimeout(() => {
const statusBox = document.querySelector('.status-box');
expect(statusBox.classList.contains('status-box-mr-merged')).toBeTruthy();
expect(statusBox.textContent).toContain('Merged');
done();
});
});
it('hides close button', (done) => {
spyOn(vm.service, 'poll').and.returnValue(returnPromise('merged'));
spyOn(vm, 'initiateRemoveSourceBranchPolling');
vm.handleMergePolling(() => {}, () => {});
setTimeout(() => {
expect(document.querySelector('.btn-close').classList.contains('hidden')).toBeTruthy();
done();
});
});
it('updates merge request count badge', (done) => {
spyOn(vm.service, 'poll').and.returnValue(returnPromise('merged'));
spyOn(vm, 'initiateRemoveSourceBranchPolling');
vm.handleMergePolling(() => {}, () => {});
setTimeout(() => {
expect(document.querySelector('.js-merge-counter').textContent).toBe('0');
done();
});
});
it('should continue polling until MR is merged', (done) => {
spyOn(vm.service, 'poll').and.returnValue(returnPromise('some_other_state'));
spyOn(vm, 'initiateRemoveSourceBranchPolling');
......
......@@ -10,15 +10,23 @@ describe Banzai::Filter::WikiLinkFilter do
it "doesn't rewrite absolute links" do
filtered_link = filter("<a href='http://example.com:8000/'>Link</a>", project_wiki: wiki).children[0]
expect(filtered_link.attribute('href').value).to eq('http://example.com:8000/')
end
it "doesn't rewrite links to project uploads" do
filtered_link = filter("<a href='/uploads/a.test'>Link</a>", project_wiki: wiki).children[0]
expect(filtered_link.attribute('href').value).to eq('/uploads/a.test')
end
describe "invalid links" do
invalid_links = ["http://:8080", "http://", "http://:8080/path"]
invalid_links.each do |invalid_link|
it "doesn't rewrite invalid invalid_links like #{invalid_link}" do
filtered_link = filter("<a href='#{invalid_link}'>Link</a>", project_wiki: wiki).children[0]
expect(filtered_link.attribute('href').value).to eq(invalid_link)
end
end
......
......@@ -39,7 +39,7 @@ describe Gitlab::Git::RevList do
]
expect(rev_list).to receive(:popen).with(*params) do |*_, lazy_block:|
lazy_block.call(output.split("\n").lazy)
lazy_block.call(output.lines.lazy.map(&:chomp))
end
end
......@@ -64,6 +64,15 @@ describe Gitlab::Git::RevList do
expect(rev_list.new_objects(require_path: true)).to eq(%w[sha2])
end
it 'can handle non utf-8 paths' do
non_utf_char = [0x89].pack("c*").force_encoding("UTF-8")
stub_lazy_popen_rev_list('newrev', '--not', '--all', '--objects', output: "sha2 πå†h/†ø/ƒîlé#{non_utf_char}\nsha1")
rev_list.new_objects(require_path: true) do |object_ids|
expect(object_ids.force).to eq(%w[sha2])
end
end
it 'can yield a lazy enumerator' do
stub_lazy_popen_rev_list('newrev', '--not', '--all', '--objects', output: "sha1\nsha2")
......
require 'spec_helper'
describe Gitlab::InsecureKeyFingerprint do
let(:key) do
'ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn' \
'1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qk' \
'r8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMg' \
'Jw0='
end
let(:fingerprint) { "3f:a2:ee:de:b5:de:53:c3:aa:2f:9c:45:24:4c:47:7b" }
describe "#fingerprint" do
it "generates the key's fingerprint" do
expect(described_class.new(key.split[1]).fingerprint).to eq(fingerprint)
end
end
end
......@@ -100,6 +100,25 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
is_expected.to eq(command)
end
end
context 'when chart values file is present' do
let(:install_command) { described_class.new(prometheus.name, chart: prometheus.chart, chart_values_file: prometheus.chart_values_file) }
let(:command) do
<<~MSG.chomp
set -eo pipefail
apk add -U ca-certificates openssl >/dev/null
wget -q -O - https://kubernetes-helm.storage.googleapis.com/helm-v2.7.0-linux-amd64.tar.gz | tar zxC /tmp >/dev/null
mv /tmp/linux-amd64/helm /usr/bin/
helm init --client-only >/dev/null
helm install #{prometheus.chart} --name #{prometheus.name} --namespace #{namespace.name} -f /data/helm/#{prometheus.name}/config/values.yaml >/dev/null
MSG
end
it 'should return appropriate command' do
is_expected.to eq(command)
end
end
end
describe "#pod_name" do
......
......@@ -52,18 +52,20 @@ describe Gitlab::Kubernetes::Helm::Pod do
it 'should include volumes for the container' do
container = subject.generate.spec.containers.first
expect(container.volumeMounts.first['name']).to eq('config-volume')
expect(container.volumeMounts.first['mountPath']).to eq('/etc/config')
expect(container.volumeMounts.first['name']).to eq('configuration-volume')
expect(container.volumeMounts.first['mountPath']).to eq("/data/helm/#{app.name}/config")
end
it 'should include a volume inside the specification' do
spec = subject.generate.spec
expect(spec.volumes.first['name']).to eq('config-volume')
expect(spec.volumes.first['name']).to eq('configuration-volume')
end
it 'should mount configMap specification in the volume' do
spec = subject.generate.spec
expect(spec.volumes.first.configMap['name']).to eq('values-config')
expect(spec.volumes.first.configMap['name']).to eq('values-content-configuration')
expect(spec.volumes.first.configMap['items'].first['key']).to eq('values')
expect(spec.volumes.first.configMap['items'].first['path']).to eq('values.yaml')
end
end
......
......@@ -16,6 +16,66 @@ describe Route do
it { is_expected.to validate_presence_of(:source) }
it { is_expected.to validate_presence_of(:path) }
it { is_expected.to validate_uniqueness_of(:path).case_insensitive }
describe '#ensure_permanent_paths' do
context 'when the route is not yet persisted' do
let(:new_route) { described_class.new(path: 'foo', source: build(:group)) }
context 'when permanent conflicting redirects exist' do
it 'is invalid' do
redirect = build(:redirect_route, :permanent, path: 'foo/bar/baz')
redirect.save!(validate: false)
expect(new_route.valid?).to be_falsey
expect(new_route.errors.first[1]).to eq('foo has been taken before. Please use another one')
end
end
context 'when no permanent conflicting redirects exist' do
it 'is valid' do
expect(new_route.valid?).to be_truthy
end
end
end
context 'when path has changed' do
before do
route.path = 'foo'
end
context 'when permanent conflicting redirects exist' do
it 'is invalid' do
redirect = build(:redirect_route, :permanent, path: 'foo/bar/baz')
redirect.save!(validate: false)
expect(route.valid?).to be_falsey
expect(route.errors.first[1]).to eq('foo has been taken before. Please use another one')
end
end
context 'when no permanent conflicting redirects exist' do
it 'is valid' do
expect(route.valid?).to be_truthy
end
end
end
context 'when path has not changed' do
context 'when permanent conflicting redirects exist' do
it 'is valid' do
redirect = build(:redirect_route, :permanent, path: 'git_lab/foo/bar')
redirect.save!(validate: false)
expect(route.valid?).to be_truthy
end
end
context 'when no permanent conflicting redirects exist' do
it 'is valid' do
expect(route.valid?).to be_truthy
end
end
end
end
end
describe 'callbacks' do
......
......@@ -192,6 +192,54 @@ describe API::Internal do
end
end
describe "GET /internal/authorized_keys" do
context "using an existing key's fingerprint" do
it "finds the key" do
get(api('/internal/authorized_keys'), fingerprint: key.fingerprint, secret_token: secret_token)
expect(response.status).to eq(200)
expect(json_response["key"]).to eq(key.key)
end
end
context "non existing key's fingerprint" do
it "returns 404" do
get(api('/internal/authorized_keys'), fingerprint: "no:t-:va:li:d0", secret_token: secret_token)
expect(response.status).to eq(404)
end
end
context "using a partial fingerprint" do
it "returns 404" do
get(api('/internal/authorized_keys'), fingerprint: "#{key.fingerprint[0..5]}%", secret_token: secret_token)
expect(response.status).to eq(404)
end
end
context "sending the key" do
it "finds the key" do
get(api('/internal/authorized_keys'), key: key.key.split[1], secret_token: secret_token)
expect(response.status).to eq(200)
expect(json_response["key"]).to eq(key.key)
end
it "returns 404 with a partial key" do
get(api('/internal/authorized_keys'), key: key.key.split[1][0...-3], secret_token: secret_token)
expect(response.status).to eq(404)
end
it "returns 404 with an not valid base64 string" do
get(api('/internal/authorized_keys'), key: "whatever!", secret_token: secret_token)
expect(response.status).to eq(404)
end
end
end
describe "POST /internal/allowed", :clean_gitlab_redis_shared_state do
context "access granted" do
around do |example|
......
......@@ -25,8 +25,10 @@ describe API::Jobs do
describe 'GET /projects/:id/jobs' do
let(:query) { Hash.new }
before do
get api("/projects/#{project.id}/jobs", api_user), query
before do |example|
unless example.metadata[:skip_before_request]
get api("/projects/#{project.id}/jobs", api_user), query
end
end
context 'authorized user' do
......@@ -51,6 +53,23 @@ describe API::Jobs do
expect(json_job['pipeline']['status']).to eq job.pipeline.status
end
it 'avoids N+1 queries', skip_before_request: true do
first_build = create(:ci_build, :artifacts, pipeline: pipeline)
first_build.runner = create(:ci_runner)
first_build.user = create(:user)
first_build.save
control_count = ActiveRecord::QueryRecorder.new { go }.count
second_pipeline = create(:ci_empty_pipeline, project: project, sha: project.commit.id, ref: project.default_branch)
second_build = create(:ci_build, :artifacts, pipeline: second_pipeline)
second_build.runner = create(:ci_runner)
second_build.user = create(:user)
second_build.save
expect { go }.not_to exceed_query_limit(control_count)
end
context 'filter project with one scope element' do
let(:query) { { 'scope' => 'pending' } }
......@@ -83,6 +102,10 @@ describe API::Jobs do
expect(response).to have_gitlab_http_status(401)
end
end
def go
get api("/projects/#{project.id}/jobs", api_user), query
end
end
describe 'GET /projects/:id/pipelines/:pipeline_id/jobs' do
......
......@@ -13,10 +13,12 @@ describe API::V3::Builds do
describe 'GET /projects/:id/builds ' do
let(:query) { '' }
before do
before do |example|
create(:ci_build, :skipped, pipeline: pipeline)
get v3_api("/projects/#{project.id}/builds?#{query}", api_user)
unless example.metadata[:skip_before_request]
get v3_api("/projects/#{project.id}/builds?#{query}", api_user)
end
end
context 'authorized user' do
......@@ -40,6 +42,23 @@ describe API::V3::Builds do
expect(json_build['pipeline']['status']).to eq build.pipeline.status
end
it 'avoids N+1 queries', skip_before_request: true do
first_build = create(:ci_build, :artifacts, pipeline: pipeline)
first_build.runner = create(:ci_runner)
first_build.user = create(:user)
first_build.save
control_count = ActiveRecord::QueryRecorder.new { go }.count
second_pipeline = create(:ci_empty_pipeline, project: project, sha: project.commit.id, ref: project.default_branch)
second_build = create(:ci_build, :artifacts, pipeline: second_pipeline)
second_build.runner = create(:ci_runner)
second_build.user = create(:user)
second_build.save
expect { go }.not_to exceed_query_limit(control_count)
end
context 'filter project with one scope element' do
let(:query) { 'scope=pending' }
......@@ -84,6 +103,10 @@ describe API::V3::Builds do
expect(response).to have_gitlab_http_status(401)
end
end
def go
get v3_api("/projects/#{project.id}/builds?#{query}", api_user)
end
end
describe 'GET /projects/:id/repository/commits/:sha/builds' do
......
......@@ -112,5 +112,24 @@ describe MergeRequests::CreateFromIssueService do
expect(result[:merge_request].assignee).to eq(user)
end
context 'when ref branch is set' do
subject { described_class.new(project, user, issue_iid: issue.iid, ref: 'feature').execute }
it 'sets the merge request source branch to the new issue branch' do
expect(subject[:merge_request].source_branch).to eq(issue.to_branch_name)
end
it 'sets the merge request target branch to the ref branch' do
expect(subject[:merge_request].target_branch).to eq('feature')
end
context 'when ref branch does not exist' do
it 'does not create a merge request' do
expect { described_class.new(project, user, issue_iid: issue.iid, ref: 'nobr').execute }
.not_to change { project.merge_requests.count }
end
end
end
end
end
require 'spec_helper'
describe GitlabShellWorker do
let(:worker) { described_class.new }
describe '#perform with add_key' do
it 'calls add_key on Gitlab::Shell' do
expect_any_instance_of(Gitlab::Shell).to receive(:add_key).with('foo', 'bar')
worker.perform(:add_key, 'foo', 'bar')
end
end
end
......@@ -90,10 +90,14 @@ codequality:
performance:
stage: performance
image:
name: sitespeedio/sitespeed.io:6.0.3
entrypoint: [""]
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
allow_failure: true
services:
- docker:dind
script:
- setup_docker
- performance
artifacts:
paths:
......@@ -112,7 +116,7 @@ sast:
- sast .
artifacts:
paths: [gl-sast-report.json]
sast:container:
image: docker:latest
variables:
......@@ -260,7 +264,7 @@ production:
export CI_APPLICATION_TAG=$CI_COMMIT_SHA
export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID}
export TILLER_NAMESPACE=$KUBE_NAMESPACE
function sast_container() {
docker run -d --name db arminc/clair-db:latest
docker run -p 6060:6060 --link db:postgres -d --name clair arminc/clair-local-scan:v2.0.1
......@@ -466,26 +470,26 @@ production:
--docker-email="$GITLAB_USER_EMAIL" \
-o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
}
function performance() {
export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
mkdir gitlab-exporter
wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/10-3/index.js
mkdir sitespeed-results
if [ -f .gitlab-urls.txt ]
then
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt
/start.sh --plugins.add gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.0.3 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt
else
/start.sh --plugins.add gitlab-exporter --outputFolder sitespeed-results $CI_ENVIRONMENT_URL
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.0.3 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL"
fi
mv sitespeed-results/data/performance.json performance.json
}
function persist_environment_url() {
echo $CI_ENVIRONMENT_URL > environment_url.txt
}
......
alertmanager: |
alertmanager:
enabled: false
kubeStateMetrics: |
enabled: 'false'
kubeStateMetrics:
enabled: false
nodeExporter: |
enabled: 'false'
nodeExporter:
enabled: false
pushgateway: |
enabled: 'false'
pushgateway:
enabled: false
serverFiles: |
alerts: ''
rules: ''
serverFiles:
alerts: ""
rules: ""
prometheus.yml: |-
rule_files: |
rule_files:
- /etc/config/rules
- /etc/config/alerts
scrape_configs: |
scrape_configs:
- job_name: prometheus
static_configs: |
static_configs:
- targets:
- localhost:9090
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs: |
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
......@@ -37,14 +40,17 @@ serverFiles: |
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
......@@ -54,14 +60,15 @@ serverFiles: |
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs: |
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: 'true'
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
......@@ -83,24 +90,30 @@ serverFiles: |
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
- job_name: 'prometheus-pushgateway'
honor_labels: true
kubernetes_sd_configs: |
kubernetes_sd_configs:
- role: service
relabel_configs: |
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: pushgateway
- job_name: 'kubernetes-services'
metrics_path: /probe
params: |
params:
module: [http_2xx]
kubernetes_sd_configs: |
kubernetes_sd_configs:
- role: service
relabel_configs: |
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: 'true'
regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
......@@ -113,17 +126,25 @@ serverFiles: |
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: 'true'
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: (.+):(?:\d+);(\d+)
replacement: ${1}:${2}
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment