getting 502 errors with Gitlab - ruby-on-rails

The Gitlab server is no longer functioning. When typing in
http://myhostname:4332/log_in
the sysetem never comes back and I get a connection timed out error
I have tried to run the following:
574 sudo strace -tt -T -f -s 1024 -p 4332 -o /tmp/unicorn.txt
575 sudo strace -tt -T -f -s 1024 -p 8080 -o /tmp/unicorn.txt
as part of troubleshooting by following the steps here:
https://docs.gitlab.com/ce/administration/troubleshooting/debug.html
The result is always the same, something like:
dgmufasa#mycomputer:~$ sudo strace -tt -T -f -s 1024 -p 4332 -o /tmp/unicorn.txt
strace: attach: ptrace(PTRACE_ATTACH, ...): No such process
Below is more information. What can I do to solve the problem?
TIA
sudo gitlab-ctl show-config
{
"gitlab": {
"gitlab-shell": {
"secret_token": "secret_token",
"auth_file": "/var/opt/gitlab/.ssh/authorized_keys"
},
"gitlab-rails": {
"gitlab_default_theme": 4,
"backup_upload_connection": {
"provider": "AWS",
"region": "us-east-2",
"aws_access_key_id": "aws_access_key",
"aws_secret_access_key": "aws_secret_key"
},
"backup_upload_remote_directory": "myhost-gitlab-bckup-0001",
"backup_multipart_chunk_size": 104857600,
"backup_encryption": "AES256",
"smtp_enable": true,
"smtp_address": "smtp.gmail.com",
"smtp_port": 587,
"smtp_user_name": "myemail#gmail.com",
"smtp_password": "mypassword",
"smtp_domain": "smtp.gmail.com",
"smtp_authentication": "login",
"smtp_enable_starttls_auto": true,
"smtp_tls": false,
"smtp_openssl_verify_mode": "peer",
"secret_key_base": "secret_key_base",
"db_key_base": "dev_key_base",
"otp_key_base": "otp_key_base",
"openid_connect_signing_key": "signing_key"
"stuck_ci_jobs_worker_cron": null,
"gitlab_host": "mycomputer.com",
"gitlab_email_from": "gitlab#mycomputer.com",
"gitlab_https": false,
"gitlab_port": 4332,
"shared_path": "/var/opt/gitlab/gitlab-rails/shared",
"artifacts_path": "/var/opt/gitlab/gitlab-rails/shared/artifacts",
"lfs_storage_path": "/var/opt/gitlab/gitlab-rails/shared/lfs-objects",
"pages_path": "/var/opt/gitlab/gitlab-rails/shared/pages",
"repositories_storages": {
"default": {
"path": "/var/opt/gitlab/git-data/repositories",
"gitaly_address": "unix:/var/opt/gitlab/gitaly/gitaly.socket",
"failure_count_threshold": 10,
"failure_wait_time": 30,
"failure_reset_time": 1800,
"storage_timeout": 30
}
},
"trusted_proxies": [
],
"db_username": "gitlab",
"db_host": "/var/opt/gitlab/postgresql",
"db_port": 5432
},
"gitlab-workhorse": {
"secret_token": "secret_token",
"auth_socket": "/var/opt/gitlab/gitlab-rails/sockets/gitlab.socket"
},
"logging": {
},
"redis": {
},
"postgresql": {
},
"unicorn": {
"port": 4333
},
"mailroom": {
},
"gitlab-pages": {
},
"prometheus": {
"flags": {
"web.listen-address": "localhost:9090",
"storage.local.path": "/var/opt/gitlab/prometheus/data",
"storage.local.chunk-encoding-version": "2",
"storage.local.target-heap-size": "106218700",
"config.file": "/var/opt/gitlab/prometheus/prometheus.yml"
}
},
"external-url": "http://mycomputer.com:4332",
"registry-external-url": null,
"mattermost-external-url": null,
"pages-external-url": null,
"runtime-dir": "/run",
"bootstrap": {
},
"omnibus-gitconfig": {
},
"manage-accounts": {
},
"manage-storage-directories": {
},
"user": {
"home": "/var/opt/gitlab",
"git_user_email": "gitlab#mycomputer.com"
},
"gitlab-ci": {
},
"sidekiq": {
},
"mattermost-nginx": {
"listen_port": null
},
"pages-nginx": {
"listen_port": null
},
"registry-nginx": {
},
"remote-syslog": {
},
"logrotate": {
},
"high-availability": {
},
"web-server": {
},
"gitaly": {
"storage": [
{
"name": "default",
"path": "/var/opt/gitlab/git-data/repositories"
}
]
},
"node-exporter": {
"flags": {
"web.listen-address": "localhost:9100",
"collector.textfile.directory": "/var/opt/gitlab/node-exporter/textfile_collector"
}
},
"redis-exporter": {
"flags": {
"web.listen-address": "localhost:9121",
"redis.addr": "unix:///var/opt/gitlab/redis/redis.socket"
}
},
"postgres-exporter": {
"flags": {
"web.listen-address": "localhost:9187",
"extend.query-path": "/var/opt/gitlab/postgres-exporter/queries.yaml"
}
},
"gitlab-monitor": {
},
"prometheus-monitoring": {
},
"pgbouncer": {
},
"sentinel": {
},
"mattermost": {
"email_invite_salt": "invite_salt",
"file_public_link_salt": "file_salt",
"sql_at_rest_encrypt_key": "sql_rest_key",
"sql_data_source": "user=gitlab_mattermost host=/var/opt/gitlab/postgresql port=5432 dbname=mattermost_production",
"sql_data_source_replicas": [
"user=gitlab_mattermost host=/var/opt/gitlab/postgresql port=5432 dbname=mattermost_production"
]
},
"nginx": {
"custom_gitlab_server_config": "location ^~ /.well-known { root /var/www/letsencrypt; }",
"proxy_set_headers": {
"Host": "$http_host_with_default",
"X-Real-IP": "$remote_addr",
"X-Forwarded-For": "$proxy_add_x_forwarded_for",
"Upgrade": "$http_upgrade",
"Connection": "$connection_upgrade",
"X-Forwarded-Proto": "http"
},
"real_ip_trusted_addresses": [
],
"listen_port": 4332
}
},
"roles": {
"application": {
},
"redis-sentinel": {
},
"redis-master": {
},
"redis-slave": {
},
"geo-primary": {
},
"geo-secondary": {
}
},
"registry": {
"http_secret": "http_secret",
"internal_certificate":"internal_certificate",
"internal_key": "internal_key"
},
"repmgr": {
},
"repmgrd": {
},
"consul": {
}
}
restarting the system
dgmufasa#mycomputer:~$ sudo gitlab-ctl restart
ok: run: gitaly: (pid 5286) 0s
ok: run: gitlab-monitor: (pid 5296) 1s
ok: run: gitlab-workhorse: (pid 5300) 0s
ok: run: logrotate: (pid 5315) 0s
ok: run: nginx: (pid 5329) 1s
ok: run: node-exporter: (pid 5336) 0s
ok: run: postgres-exporter: (pid 5341) 0s
ok: run: postgresql: (pid 5350) 0s
ok: run: prometheus: (pid 5394) 0s
ok: run: redis: (pid 5404) 1s
ok: run: redis-exporter: (pid 5409) 0s
ok: run: sidekiq: (pid 5418) 1s
ok: run: unicorn: (pid 5427) 0s
I executed: dgmufasa#mycomputer:~$ sudo tail -10 /var/log/gitlab/nginx/gitlab_access.log
192.222.22.22 - - [24/Jan/2018:21:55:24 -0600] "GET /users/sign_in HTTP/1.1" 302 83 "" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
192.222.22.22 - - [24/Jan/2018:21:55:28 -0600] "GET / HTTP/1.1" 200 7845 "" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
192.222.22.22 - - [24/Jan/2018:21:58:50 -0600] "GET /CCLUB-WORK/ccindb-restapi-fieldworkserver HTTP/1.1" 502 2916 "http://mycomputer:4332/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
192.222.22.22 - - [24/Jan/2018:21:58:57 -0600] "GET /CCLUB-WORK/ccindb-restapi-fieldworkserver HTTP/1.1" 502 2916 "http://mycomputer:4332/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
192.222.22.22 - - [24/Jan/2018:21:59:00 -0600] "GET /CCLUB-WORK/ccindb-restapi-fieldworkserver HTTP/1.1" 502 2916 "http://mycomputer:4332/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
192.222.22.22 - - [24/Jan/2018:22:00:51 -0600] "GET /CCLUB-WORK/ccindb-restapi-fieldworkserver HTTP/1.1" 499 0 "http://mycomputer:4332/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"

Well, I ran : sudo gitlab-ctl reconfigure and everything started working again. I don't know why - so - it would be good to get some kind of hint to know what this command does.

Related

Cypress: Is it possible to complete a test after failure but as failed test

UseCase :
I want to automatically test all sitemap URLs (more than 1000) of our website after each new code release to see if the update broke any of them.
My problem :
Test fails once a URL returns anything else than status 200 (even with failOnStatus: false) but instead I need to save the results in a sort of a callback function and only check them at the end and fail the test at the end (in order to find all the broken links)
My code :
describe('Validate sitemaps files', () => {
let urls = [];
it("Should succesfully load each url in the sitemap", () => {
cy.fixture('sitemaps.json').then((data) => {
for (var index in data) {
cy.log(data[index].url)
cy.request({
//url: Cypress.config().baseUrl + data[index].url, failOnStatusCode: false,
url: data[index].url, failOnStatus: false,
headers: {
"Content-Type": "text/xml; charset=utf-8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36",
},
}).as("sitemap").then((response) => {
urls = Cypress.$(response.body)
.find("loc")
.toArray()
.map((el) => el.innerText
)
})
}
urls.forEach((url) => {
// check if the resource exists
cy.request(url).its('status').then(status => {
if (status !== 200) {
failed.push(url)
}
}).then(() => {
// check inside then to ensure loop has finished
cy.log('Failed links: ' + `${failed.join(', ')}`)
expect(failed.length).to.eq(0)
})
cy.wrap('passed').as('ctrl')
})
})
})
})
Fixture (just an example to test my code) :
[
{
"url": "https://gooogle.com"
},
{
"url": "https://browserstack.com/notfound"
},
{
"url": "https://yahoo.com"
},
{
"url": "https://browserstack.com"
}
]
Test result
enter image description here
I have already seen this answer ==> Google but no success so far.
Any help is much appreciated

Logstash nginx parser for http_forwared_for

I am sending nginx logs to elasticsearch by using filebeat and logstash. My logs have the following form:
000.000.000.000 - - [17/Oct/2022:08:25:18 +0000] "OPTIONS /favicon.svg HTTP/1.1" 405 559 "https://example.net/auth/login" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36" "111.111.111.111, 222.222.222.222"
I have the following configuration file for logstash:
input {
beats {
port => 5035
}
}
filter {
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}+%{GREEDYDATA:http_x_forwarded_for}"]
}
mutate {
convert => ["response", "integer"]
convert => ["bytes", "integer"]
convert => ["responsetime", "float"]
}
geoip {
source => "clientip"
target => "geoip"
add_tag => [ "nginx-geoip" ]
}
date {
match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
}
useragent {
source => "message"
}
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
index => "weblogs-%{+YYYY.MM.dd}"
document_type => "nginx_logs"
user => "elastic"
password => "changeme"
}
stdout { codec => rubydebug }
}
This pipeline saves the logs to elasticsearch in the following form:
"response" : 405,
"timestamp" : "17/Oct/2022:08:25:18 +0000",
"os_version" : "10",
"auth" : "-",
"verb" : "OPTIONS",
"clientip" : "000.000.000.000",
"httpversion" : "1.1",
"referrer" : "\"https://example.net/auth/login\"",
"geoip" : { },
"os" : "Windows",
"os_name" : "Windows",
"agent" : {
"version" : "7.17.6",
"hostname" : "0242869f2486",
"type" : "filebeat",
"id" : "4de3a108-35bf-4bd9-8b18-a5d8f9f2bc83",
"ephemeral_id" : "3a5f78b5-bae0-41f6-8d63-eea700df6c3c",
"name" : "0242869f2486"
},
"log" : {
"file" : {
"path" : "/var/log/nginx/access.log"
},
"offset" : 1869518
},
"bytes" : 559,
"ident" : "-",
"http_x_forwarded_for" : " \"111.111.111.111, 222.222.222.222\"",
"os_full" : "Windows 10",
"#timestamp" : "2022-10-17T08:25:18.000Z",
"request" : "/favicon.svg",
"device" : "Spider",
"name" : "favicon",
"input" : {
"type" : "log"
},
"host" : {
"name" : "0242869f2486"
},
"os_major" : "10",
"#version" : "1",
"message" : "000.000.000.000 - - [17/Oct/2022:08:25:18 +0000] \"OPTIONS /favicon.svg HTTP/1.1\" 405 559 \"https://example.net/auth/login\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36\" \"111.111.111.111, 222.222.222.222\"",
"tags" : [
"beats_input_codec_plain_applied",
"_geoip_lookup_failure"
]
However, my goal is to parse the first IP from the http_forwared_for field and add a new filed called real_client_ip and add it save it to the index. Is there a way to achieve that?
You can add one more grok filter to your logstash pipeline after first grok filter.
filter {
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}+%{GREEDYDATA:http_x_forwarded_for}"]
}
grok {
match => [ "http_x_forwarded_for" , "%{IP:real_client_ip}"]
}
mutate {
convert => ["response", "integer"]
convert => ["bytes", "integer"]
convert => ["responsetime", "float"]
}
geoip {
source => "clientip"
target => "geoip"
add_tag => [ "nginx-geoip" ]
}
date {
match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
}
useragent {
source => "message"
}
}
PS: I have validated grok pattern in Kibana but not by running logstash pipeline. but this should work for your usecase.

Logstash cannot parse user_agent field of nginx

I have nginx logs with the following format:
192.168.0.1 - - [18/Jul/2022:11:20:28 +0000] "GET / HTTP/1.1" 200 15 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36" "-"
192.168.128.1 - - [18/Jul/2022:13:22:15 +0000] "GET / HTTP/1.1" 200 615 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36" "-"
I am using the following pipeline to parse them and store them into elasticsearch:
input {
beats {
port => 5044
}
}
filter {
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}+%{GREEDYDATA:extra_fields}"]
}
mutate {
convert => ["response", "integer"]
convert => ["bytes", "integer"]
convert => ["responsetime", "float"]
}
geoip {
source => "clientip"
target => "geoip"
add_tag => [ "nginx-geoip" ]
}
date {
match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
}
useragent {
source => "agent"
}
}
output {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "weblogs-%{+YYYY.MM.dd}"
document_type => "nginx_logs"
user => "elastic"
password => "changeme"
}
stdout { codec => rubydebug }
}
However, it seems that the part of useragent does not work, since I cannot see it:
{
"httpversion" => "1.1",
"clientip" => "192.168.0.1",
"ident" => "-",
"timestamp" => "18/Jul/2022:11:20:28 +0000",
"verb" => "GET",
"#timestamp" => 2022-07-18T11:20:28.000Z,
"#version" => "1",
"tags" => [
[0] "beats_input_codec_plain_applied",
[1] "_geoip_lookup_failure"
],
"host" => {
"name" => "9a852bd136fd"
},
"auth" => "-",
"bytes" => 15,
"referrer" => "\"-\"",
"geoip" => {},
"message" => "192.168.0.1 - - [18/Jul/2022:11:20:28 +0000] \"GET / HTTP/1.1\" 200 15 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36\" \"-\"",
"response" => 200,
"agent" => {
"version" => "7.3.2",
"ephemeral_id" => "0c38336d-1e30-4aaa-9ba8-20bd7bd8fb48",
"type" => "filebeat",
"hostname" => "9a852bd136fd",
"id" => "8991142a-95df-4aed-a190-bda4649c04cd"
},
"input" => {
"type" => "log"
},
"request" => "/",
"extra_fields" => " \"-\"",
"log" => {
"file" => {
"path" => "/var/log/nginx/access.log"
},
"offset" => 11021
},
"ecs" => {
"version" => "1.0.1"
}
}
What I need is to have a field including the whole http_user_agent content. Any idea of what is causing the error?

Sberbank balance parsing

I was trying to parse my balance from sberbank.ru
So here is what i've tried
import requests
headers = {
'Cookie': 'cookie',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.132 YaBrowser/22.3.1.895 Yowser/2.5 Safari/537.36',
}
params = {
'forceUpdate': 'false',
'withData': 'true'
}
resp_json = requests.post('https://web-node6.online.sberbank.ru/main-screen/rest/v1/web/section/meta', headers=headers, params=params).json()
print(resp_json)
But i got this error
{'status': {'code': 3, 'errors': [{'id': 'EFSGW-41', 'element': '', 'title': 'datapower', 'description': 'Ошибка валидации запроса'}], 'warnings': [{'id': '', 'element': '', 'title': '', 'description': ''}]}}

springdoc / Swagger UI not passing OAuth2 token to API

I am using springdoc (v1.5.9) to generate Swagger definitions for an API. After authenticating inside the Swagger UI and executing a secured method, the http request received by the Spring Boot app has no Authentication header. I have confirmed via JS debugger that the Swagger UI received and stored a valid authentication token.
Below are the HTTP request, the Swagger api-docs showing the security scheme defined / applied to the method, the springdoc configuration and a controller.
How do I need to change / add to my Spring configuration to get the Authorization passed to the API from the Swagger UI?
HTTP request received
Request received for GET '/foo/1234':
org.apache.catalina.connector.RequestFacade#71c70030
servletPath:/foo/1234
pathInfo:null
headers:
host: localhost:8070
connection: keep-alive
sec-ch-ua: "Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"
accept: application/json
sec-ch-ua-mobile: ?0
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36
sec-fetch-site: same-origin
sec-fetch-mode: cors
sec-fetch-dest: empty
referer: http://localhost:8070/swagger-ui/index.html?configUrl=/v3/api-docs/swagger-config
accept-encoding: gzip, deflate, br
accept-language: en-US,en;q=0.9
cookie: JSESSIONID=A0F9846102153C06D77D6ED5506CC227
Security filter chain: [
WebAsyncManagerIntegrationFilter
SecurityContextPersistenceFilter
HeaderWriterFilter
CsrfFilter
LogoutFilter
BearerTokenAuthenticationFilter
RequestCacheAwareFilter
SecurityContextHolderAwareRequestFilter
AnonymousAuthenticationFilter
SessionManagementFilter
ExceptionTranslationFilter
FilterSecurityInterceptor
]
api-docs
{
"openapi": "3.0.1",
"info": {
"title": "My App",
"version": "v1"
},
"servers": [
{
"url": "http://localhost:8070",
"description": "Generated server url"
}
],
"paths": {
"/foo/{id}": {
"get": {
"tags": [
"foo"
],
"parameters": [
{
"name": "id",
"in": "path",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
},
"security": [
{
"custom": [
]
}
]
}
},
"securitySchemes": {
"access_token": {
"type": "oauth2",
"in": "header",
"scheme": "custom",
"flows": {
"authorizationCode": {
"authorizationUrl": "https://login.myauthsever.com/v2/oauth/authorize",
"tokenUrl": "https://login.myauthsever.com/v2/oauth/token",
"scopes": {
}
}
}
}
}
}
}
OpenAPI config
#OpenAPIDefinition(info = #Info(title = "My App", version = "v1"))
#SecurityScheme(scheme = "custom", type = SecuritySchemeType.OAUTH2, in = SecuritySchemeIn.HEADER, name = "access_token",
flows = #OAuthFlows(authorizationCode = #OAuthFlow(
authorizationUrl = "https://login.myauthsever.com/v2/oauth/authorize",
tokenUrl = "https://login.myauthsever.com/v2/oauth/token", scopes = {})))
public class OpenApiConfig {
}
Controller
#RestController
#Tag(name = "foo")
#SecurityRequirement(name = "custom")
public class SystemSigController {
#GetMapping(path = "/foo/{id}")
String getFoo(#PathVariable String id) {
...
}
}
The #SecurityRequirement.name value must be the same as #SecurityScheme.name.
Since you have #SecurityScheme(..., name = "access_token"...), the controller must use:
#SecurityRequirement(name = "access_token")

Resources