I am trying to add CORS header to my app when deploying it to cloud via docker I get the error:
nginx: [emerg] "server" directive is not allowed here in /etc/nginx/conf.d/default.conf:1
My nginx file
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server {
root https://srm-master.nonprod.com;
index index.html index.htm;
set $cors "";
if ($http_origin ~* (.*\.ini.com)) {
set $cors "true";
}
server_name .ini.com;
location / {
if ($cors = "true") {
add_header 'Access-Control-Allow-Origin' "$http_origin";
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS,
DELETE, PUT';
add_header 'Access-Control-Allow-Credentials' 'true';
add_header 'Access-Control-Allow-Headers' 'User-Agent,Keep-
Alive,Content-Type';
}
}
}
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
Actually, the problem has nothing to do with Docker, the cause of the error is nginx config. Because nginx allow only one http section, and it has been defined at /etc/nginx/nginx.conf. Remove the http section in your config, and it should be worked
server {
root https://srm-master.nonprod.com;
index index.html index.htm;
set $cors "";
if ($http_origin ~* (.*\.ini.com)) {
set $cors "true";
}
server_name .ini.com;
location / {
if ($cors = "true") {
add_header 'Access-Control-Allow-Origin' "$http_origin";
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS,
DELETE, PUT';
add_header 'Access-Control-Allow-Credentials' 'true';
add_header 'Access-Control-Allow-Headers' 'User-Agent,Keep-
Alive,Content-Type';
}
}
}
I have index.php and manager.php for those 2 applications in root, setup though docker with nginx, phpfpm and some other dependencies.
This is my docker-compose file, I put only the important parts.
services:
web:
container_name: web
build:
context: ./
dockerfile: docker/nginx/Dockerfile
volumes:
- ./:/var/www
ports:
- 80
depends_on:
- app
environment:
VIRTUAL_HOST: ${VIRTUAL_HOSTS}
VIRTUAL_PORT: 80
networks:
- nginx-proxy
- my-app
app:
container_name: app
build:
context: ./
dockerfile: docker/php/Dockerfile
volumes:
- ./:/var/www
depends_on:
- mysql
ports:
- 9000
networks:
- my-app
...
And this is my vhost file, i tried everything i knew or found on the internet to make it work without success, this is final form, of course still not working.
server {
listen 80;
server_name myapplication.local;
index index.php index.html;
root /var/www;
location / {
try_files $uri $uri/ =404;
}
location /manager.php {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass app:9000;
fastcgi_index manager.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
}
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass app:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
}
try_files $uri $uri/ /index.php;
if (!-e $request_filename){
rewrite ^/(.*)$ /index.php?/$1? last;
}
location ~ /\.ht {
deny all;
}
location = /favicon.ico {
log_not_found off; access_log off;
}
location = /robots.txt {
log_not_found off; access_log off; allow all;
}
location ~* \.(css|gif|ico|jpeg|jpg|js|png)$ {
expires max;
log_not_found off;
}
sendfile off;
}
If I access application normal works just fine, if I go to
/manager.php or /manager.php/* I get 404, does any knows how I can configure nginx to work with this situation, thanks!
I found the solution:
server {
listen 80;
server_name myapplication.local;
index index.php index.html manager.php;
root /var/www;
location / {
try_files $uri $uri/ =404;
}
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass app:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
}
try_files $uri $uri/ /index.php /manager.php;
if (!-e $request_filename){
rewrite ^/manager.php/(.*)$ /manager.php?/$1? last;
rewrite ^/(.*)$ /index.php?/$1? last;
}
location ~ /\.ht {
deny all;
}
location = /favicon.ico {
log_not_found off; access_log off;
}
location = /robots.txt {
log_not_found off; access_log off; allow all;
}
location ~* \.(css|gif|ico|jpeg|jpg|js|png)$ {
expires max;
log_not_found off;
}
sendfile off;
}
I try to set up ssl with Let’s Encrypt using this article https://medium.com/#pentacent/nginx-and-lets-encrypt-with-docker-in-less-than-5-minutes-b4b8a60d3a71
my nginx config
server {
listen 80;
server_name kcr.ttfr.ru;
server_name www.kcr.ttfr.ru;
root /var/www/k4fntr/public;
index /frontend/index.html;
client_max_body_size 128M;
gzip on; # enable gzip
gzip_disable "msie6";
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript;
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log debug;
location / {
try_files /frontend/$uri $uri $uri/ /index.php?$args; # permalinks
client_max_body_size 128M;
}
location ~ /\. {
deny all; # deny hidden files
}
location ~* /(?:uploads|files)/.*\.php$ {
deny all; # deny scripts
}
location ~* ^.+\.(ogg|ogv|svg|svgz|eot|otf|woff|mp4|ttf|rss|atom|jpg|jpeg|gif|png|ico|zip|tgz|gz|rar|bz2|doc|xls|exe|ppt|tar|mid|midi|wav|bmp|rtf)$ {
access_log off;
log_not_found off;
expires max; # cache static files
try_files /frontend/$uri $uri $uri/ /index.php?$args; # permalinks
}
location ~ \.php$ {
proxy_set_header X-Real-IP $remote_addr;
fastcgi_pass k4fntr_php-fpm:9000;
fastcgi_index index.php;
include /etc/nginx/fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_read_timeout 300;
}
location /socket.io {
proxy_pass http://k4fntr_echo:6001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
}
location ~ /\.ht {
deny all;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/kcr.ttfr.ru/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/kcr.ttfr.ru/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location /.well-known/acme-challenge/ { root /var/www/certbot; }
}
but my challenges were failed because of url /.well-known/acme-challenge/ returns 403:Forbidden
what's wrong with my nginx configuration?
change your location to something like this:
location /.well-known/acme-challenge {
root /var/www/certbot;
default_type text/plain;
}
another question. Do you want to redirect all non-http traffic to https?
In that case I would create a server block listen port 80 and another one listen on 443.
server {
listen 80;
server_name domain.io;
location / {
return 301 https://$server_name$request_uri;
}
location /.well-known/acme-challenge {
root root /var/www/certbot;
default_type text/plain;
}
}
server {
listen 443 ssl;
server_name domain.io;
add_header Strict-Transport-Security "max-age=31536000" always;
...
}
I've been struggling with this awkward issue for well over a week now and couldn't resolve it. Any help will be highly appreciated!
I'm building a web application that uses Nginx as the proxy, React for web's front-end, GoLang for my backend API. The entire application is running on Docker (version 19.03.5).
After I ran npm install to install some new packages and went to https://127.0.0.1:8000/ to start building the app, a blank white screen appeared and new error inside my Chrome dev tools:
sockjs.js:689 Uncaught Error: SecurityError: An insecure SockJS connection may not be initiated from a page loaded over HTTPS
at new SockJS (sockjs.js:689)
at new SockJSClient (webpack:///(:8000/webpack)-dev-server/client/clients/SockJSClient.js?:39:18)
at initSocket (webpack:///(:8000/webpack)-dev-server/client/socket.js?:20:12)
at Object.eval (webpack:///(:8000/webpack)-dev-server/client?:176:1)
at eval (webpack:///(:8000/webpack)-dev-server/client?:177:30)
at Object../node_modules/webpack-dev-server/client/index.js?http://0.0.0.0:8081 (bundle.cf100e5b1875c7903444.js:9267)
at __webpack_require__ (bundle.cf100e5b1875c7903444.js:727)
at fn (bundle.cf100e5b1875c7903444.js:101)
at eval (webpack:///multi_(:8000/webpack)-dev-server/client?:1:1)
at Object.0 (bundle.cf100e5b1875c7903444.js:10880)
At this point, I was told to add https: true to my webpack.config.js as follows:
devServer: {
contentBase: buildPath,
inline: false,
watchContentBase: true,
compress: true,
historyApiFallback: true, // any routes will fetch bundle.js file
disableHostCheck: true, // for nginx proxy
port: 8081,
https: true,
},
Then I rebuilt my Docker with the following command docker-compose -f development.docker-compose.yml up --build
and tried to access https://127.0.0.1:8000/ whether it solved my issue. Unfortunately, after the successful Docker-compose built, I'm getting 404 Not Found from Nginx.
After little digging inside Nginx container, I found the following log:
nginxbetteralpha | 2019/12/30 20:40:02 [emerg] 1#1: host not found in upstream "goapi:3000" in /etc/nginx/conf.d/dev_better_alpha.conf:2
nginxbetteralpha | nginx: [emerg] host not found in upstream "goapi:3000" in /etc/nginx/conf.d/dev_better_alpha.conf:2
My dev partner tried the following solutions found here Docker Networking - nginx: [emerg] host not found in upstream but none of them solved the problem.
Below you can look into my relevant config files:
webpack.config.js
module.exports = () => {
return {
context: contextPath,
entry: {
main: ["#babel/polyfill", "webpack/hot/dev-server", indexJsPath],
},
output: {
// TODO: add this module for css bundle
// https://webpack.js.org/plugins/mini-css-extract-plugin/
// https://medium.com/#tomaskoutsky/hey-webpack-can-you-bust-my-cache-21350f951220
// filename: "[name].[hash].js",
filename: "bundle.[hash].js",
publicPath: "/", // very important otherwise index.html has src="bundle.js" instead of src="/bundle.js" => nginx wont be able to find it in sub paths
path: buildPath,
},
devServer: {
contentBase: buildPath,
inline: true,
watchContentBase: true,
compress: true,
historyApiFallback: true, // any routes will fetch bundle.js file
disableHostCheck: true, // for nginx proxy
port: 8081,
},
module: {
rules: [
{
test: /\.jsx?$/,
exclude: /node_modules/,
loader: "babel-loader",
},
{
test: /\.css$/,
exclude: [/src/],
use: [
require.resolve("style-loader"),
{
loader: require.resolve("css-loader"),
options: {
importLoaders: 1,
},
},
],
},
{
test: /\.css$/,
exclude: [/node_modules/],
use: [
{ loader: "style-loader" },
{
loader: "css-loader",
options: {
modules: true,
url: true,
localIdentName: "[local]___[hash:base64:5]", // it has to be same as `generateScopedName` in .babelrc react-css-module config setting !!
},
},
{ loader: "postcss-loader" },
],
},
{
test: /\.(png|jpg|gif|jpeg|svg)$/,
use: [
{
loader: "url-loader",
options: {
limit: 1000,
outputPath: "images",
name: "[name]-[hash:6].[ext]",
},
},
{
loader: "image-webpack-loader",
options: {
disable: true, // in dev..
},
},
],
},
{
test: /\.(woff|woff2|eot|ttf)$/,
loader: "url-loader",
},
],
},
plugins: [HTMLWebpackPluginConfig, dotEnvPlugin],
resolve: {
extensions: [".js", ".jsx", ".json", ".css"],
},
};
};
dev/dev.conf
upstream goapi {
server goapi:3000;
}
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://webpackdevserver:8081;
}
location /api {
# proxy to golang API
proxy_pass http://goapi;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
# root /usr/share/nginx/html;
}
}
dev/conf.d/dev.conf
upstream goapi {
server goapi:3000;
}
server {
# http
listen 80;
# server_name _;
server_name localhost;
location / {
# redirect any requests to the same URL but on https
return 301 https://$host$request_uri:8000;
}
# https://serverfault.com/questions/10854/nginx-https-serving-with-same-config-as-http
#http://nginx.org/en/docs/http/configuring_https_servers.html#single_http_https_server
}
server {
# https
listen 443 ssl;
# server_name _;
server_name localhost;
# location of the self-signed SSL certificate
ssl_certificate /usr/share/ssl_certs/cert.pem;
ssl_certificate_key /usr/share/ssl_certs/key.pem;
location / {
proxy_pass http://webpackdevserver:8081;
}
location /api {
# proxy to golang API
proxy_pass http://goapi;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
# root /usr/share/nginx/html;
}
}
dev/nginx.conf
user nginx;
worker_processes 2;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
Any help will be greeeatly appreciated !!!
There appear to be a few issues with your configuration-
Are you intending to connect to your application via HTTPS? If so,
it doesn't look like you have Nginx configured for HTTPS - i.e., no
certificates, etc. For a good summary on how to do this, check the
Nginx docs
You're attempting to connect to your application via
port 8000, but Nginx isn't set up to listen on that port. You only
have it listening on port 80.
Try resolving these first, then update us with the results.
I have the following configuration for Varnish. But when I access the application, it doesn't ask for login, it just login.
What I'm doing wrong?
default.vcl
backend default {
.host = "127.0.0.1";
.port = "80";
}
sub vcl_recv {
if(req.url ~ "sign_in" || req.url ~ "sign_out" || req.request == "POST" || req.request == "PUT" || req.request == "DELETE") {
return (pass);
}
return (lookup);
}
sub vcl_fetch {
if(req.url ~ "logout" || req.url ~ "sign_out"){
unset beresp.http.Set-Cookie;
}
if (req.request == "GET") {
unset beresp.http.Set-Cookie;
set beresp.ttl = 360m;
}
if (req.url ~ "images/" || req.url ~ "javascripts" || req.url ~ "stylesheets" || req.url ~ "assets"){
set beresp.ttl = 360m;
}
}
/etc/default/varnish
DAEMON_OPTS="-a 192.241.136.37:80 \
-T localhost:6082 \
-f /etc/varnish/default.vcl \
-S /etc/varnish/secret \
-s malloc,256m"
/etc/nginx/sites-enabled/default
upstream app {
server unix:/tmp/unicorn.socket fail_timeout=0;
}
server {
listen 80;
client_max_body_size 2G;
server_name localhost;
keepalive_timeout 5;
root /home/deploy/apps/wms/current/public;
access_log off;
error_log off;
if ($request_method !~ ^(GET|HEAD|PUT|POST|DELETE|OPTIONS)$ ){
return 405;
}
location ~ ^/(assets)/ {
gzip_static on;
expires max;
add_header Cache-Control public;
}
location / {
try_files $uri/index.html $uri.html $uri #app;
error_page 404 /404.html;
error_page 422 /422.html;
error_page 500 502 503 504 /500.html;
error_page 403 /403.html;
}
location #app {
proxy_pass http://app;
}
location = /favicon.ico {
expires max;
add_header Cache-Control public;
}
location ~ \.php$ {
deny all;
}
}
You are preventing your backend to delete your session cookie, so you can't log out unless you explicitly delete your browsers' cookies.
Looking at your fetch VCL (Comment inline):
sub vcl_fetch {
# This prevents server from deleting the cookie in the browser when loging out
if(req.url ~ "logout" || req.url ~ "sign_out"){
unset beresp.http.Set-Cookie;
}
if (req.request == "GET") {
unset beresp.http.Set-Cookie;
set beresp.ttl = 360m;
}
if (req.url ~ "images/" || req.url ~ "javascripts" || req.url ~ "stylesheets" || req.url ~ "assets"){
set beresp.ttl = 360m;
}
}
So your backend can't delete client's cookie unless as result of a POST request.
IMHO you shouldn't mess with backend's Set-Cookie headers unless you know (and test well) posible side effects