Node.js + Nginx - что теперь?

Если вы хотите объединить 2 массива объектов в JavaScript. Вы можете использовать этот однолинейный трюк

Array.prototype.push.apply(arr1,arr2);

Для примера

var arr1 = [{name: "lang", value: "English"},{name: "age", value: "18"}];
var arr2 = [{name : "childs", value: '5'}, {name: "lang", value: "German"}];

Array.prototype.push.apply(arr1,arr2); 

console.log(arr1);  // final merged result will be in arr1

Выход:

[{"name":"lang","value":"English"},
{"name":"age","value":"18"},
{"name":"childs","value":"5"},
{"name":"lang","value":"German"}]
937
задан Igor Antun 17 April 2015 в 23:55
поделиться

1 ответ

Лучшая и более простая установка с Nginx и Nodejs должна использовать Nginx в качестве HTTP и подсистемы балансировки нагрузки TCP с включенным proxy_protocol. В этом контексте Nginx сможет проксировать входящие запросы к nodejs и также оконечные соединения SSL с бэкендом сервер (серверы) Nginx, а не с самим прокси-сервером. (Передача SSL)

, По-моему, нет никакого смысла в предоставлении примеров не-SSL, так как все веб-приложения (или должен быть), использующий безопасные среды.

конфигурация В качестве примера для прокси-сервера, в [1 115]/etc/nginx/nginx.conf

user  nginx;
worker_processes  1;
error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;
events {
    worker_connections  1024;
}
http {
  upstream webserver-http {
    server 192.168.1.4; #use a host port instead if using docker
    server 192.168.1.5; #use a host port instead if using docker
  }
  upstream nodejs-http {
    server 192.168.1.4:8080; #nodejs listening port
    server 192.168.1.5:8080; #nodejs listening port
  }
  server {
    server_name example.com;
    location / {
      proxy_set_header X-Real-IP $remote_addr;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_set_header X-Forwarded-Proto $scheme;
      proxy_set_header Host $http_host;
      proxy_set_header X-Forwarded-Host $server_name;
      proxy_set_header Connection "";
      add_header       X-Upstream $upstream_addr;
      proxy_redirect     off;
      proxy_connect_timeout  300;
      proxy_http_version 1.1;
      proxy_buffers 16 16k;
      proxy_buffer_size 16k;
      proxy_cache_background_update on;
      proxy_pass http://webserver-http$request_uri;
    }
  }
  server {
    server_name node.example.com;
    location / {
      proxy_set_header X-Real-IP $remote_addr;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_set_header X-Forwarded-Proto $scheme;
      proxy_set_header Host $http_host;
      proxy_set_header X-Forwarded-Host $server_name;
      proxy_set_header Upgrade $http_upgrade;
      proxy_set_header Connection "Upgrade";
      add_header       X-Upstream $upstream_addr;
      proxy_redirect     off;
      proxy_connect_timeout  300;
      proxy_http_version 1.1;
      proxy_buffers 16 16k;
      proxy_buffer_size 16k;
      proxy_cache_background_update on;
      proxy_pass http://nodejs-http$request_uri;
    }
  }
}
stream {
  upstream webserver-https {
    server 192.168.1.4:443; #use a host port instead if using docker
    server 192.168.1.5:443; #use a host port instead if using docker
  }

  server {
    proxy_protocol on;
    tcp_nodelay on;
    listen 443;
    proxy_pass webserver-https;
  }
  log_format proxy 'Protocol: $protocol - $status $bytes_sent $bytes_received $session_time';
  access_log  /var/log/nginx/access.log proxy;
  error_log /var/log/nginx/error.log debug;
}

Теперь, давайте обработаем веб-сервер бэкенда. /etc/nginx/nginx.conf:

user  nginx;
worker_processes  1;
error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;
load_module /etc/nginx/modules/ngx_http_geoip2_module.so; # GeoIP2
events {
    worker_connections  1024;
}
http {
    variables_hash_bucket_size 64;
    variables_hash_max_size 2048;
    server_tokens off;
    sendfile    on;
    tcp_nopush  on;
    tcp_nodelay on;
    autoindex off;
    keepalive_timeout  30;
    types_hash_bucket_size 256;
    client_max_body_size 100m;
    server_names_hash_bucket_size 256;
    include         mime.types;
    default_type    application/octet-stream;
    index  index.php index.html index.htm;
    # GeoIP2
    log_format  main    'Proxy Protocol Address: [$proxy_protocol_addr] '
                        '"$request" $remote_addr - $remote_user [$time_local] "$request" '
                        '$status $body_bytes_sent "$http_referer" '
                        '"$http_user_agent" "$http_x_forwarded_for"';

    # GeoIP2
    log_format  main_geo    'Original Client Address: [$realip_remote_addr]- Proxy Protocol Address: [$proxy_protocol_addr] '
                            'Proxy Protocol Server Address:$proxy_protocol_server_addr - '
                            '"$request" $remote_addr - $remote_user [$time_local] "$request" '
                            '$status $body_bytes_sent "$http_referer" '
                            '$geoip2_data_country_iso $geoip2_data_country_name';

    access_log  /var/log/nginx/access.log  main_geo; # GeoIP2
#===================== GEOIP2 =====================#
    geoip2 /usr/share/geoip/GeoLite2-Country.mmdb {
        $geoip2_metadata_country_build  metadata build_epoch;
        $geoip2_data_country_geonameid  country geoname_id;
        $geoip2_data_country_iso        country iso_code;
        $geoip2_data_country_name       country names en;
        $geoip2_data_country_is_eu      country is_in_european_union;
    }
    #geoip2 /usr/share/geoip/GeoLite2-City.mmdb {
    #   $geoip2_data_city_name city names en;
    #   $geoip2_data_city_geonameid city geoname_id;
    #   $geoip2_data_continent_code continent code;
    #   $geoip2_data_continent_geonameid continent geoname_id;
    #   $geoip2_data_continent_name continent names en;
    #   $geoip2_data_location_accuracyradius location accuracy_radius;
    #   $geoip2_data_location_latitude location latitude;
    #   $geoip2_data_location_longitude location longitude;
    #   $geoip2_data_location_metrocode location metro_code;
    #   $geoip2_data_location_timezone location time_zone;
    #   $geoip2_data_postal_code postal code;
    #   $geoip2_data_rcountry_geonameid registered_country geoname_id;
    #   $geoip2_data_rcountry_iso registered_country iso_code;
    #   $geoip2_data_rcountry_name registered_country names en;
    #   $geoip2_data_rcountry_is_eu registered_country is_in_european_union;
    #   $geoip2_data_region_geonameid subdivisions 0 geoname_id;
    #   $geoip2_data_region_iso subdivisions 0 iso_code;
    #   $geoip2_data_region_name subdivisions 0 names en;
   #}

#=================Basic Compression=================#
    gzip on;
    gzip_disable "msie6";
    gzip_vary on;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_buffers 16 8k;
    gzip_http_version 1.1;
    gzip_types text/css text/xml text/plain application/javascript image/jpeg image/png image/gif image/x-icon image/svg+xml image/webp application/font-woff application/json application/vnd.ms-fontobject application/vnd.ms-powerpoint;
    gzip_static on;

    include /etc/nginx/sites-enabled/example.com-https.conf;
}

Теперь, давайте настроим виртуальный хост с этим SSL, и proxy_protocol включил конфигурацию в [1 117]/etc/nginx/sites-available/example.com-https.conf :

server {
    real_ip_header proxy_protocol;
    set_real_ip_from 192.168.1.1; #proxy server ip address
    #set_real_ip_from proxy; #proxy container hostname if you are using docker
    server_name 192.168.1.4; #Your current server ip address. It will redirect to the domain name.
    listen 80;
    listen 443 ssl http2;
    listen [::]:80;
    listen [::]:443 ssl http2;
    ssl_certificate     /etc/nginx/certs/example.com.crt;
    ssl_certificate_key /etc/nginx/certs/example.com.key;
    ssl_dhparam /etc/nginx/ssl/dhparam.pem;
    return 301 https://example.com$request_uri;
}
server {
    real_ip_header proxy_protocol;
    set_real_ip_from 192.168.1.1; #proxy server ip address
    #set_real_ip_from proxy; #proxy container hostname if you are using docker
    server_name  example.com;
    listen       *:80;
    return 301   https://example.com$request_uri;
}
server {
    real_ip_header proxy_protocol;
    set_real_ip_from 192.168.1.1; #proxy server ip address
    #set_real_ip_from proxy; #proxy container hostname if you are using docker
    server_name www.example.com;
    listen 80;
    listen 443 http2;
    listen [::]:80;
    listen [::]:443 ssl http2 ;
    ssl_certificate     /etc/nginx/certs/example.com.crt;
    ssl_certificate_key /etc/nginx/certs/example.com.key;
    ssl_dhparam /etc/nginx/ssl/dhparam.pem;
    return 301 https://example.com$request_uri;
}
server {
    real_ip_header proxy_protocol;
    set_real_ip_from 192.168.1.1; #proxy server ip address
    #set_real_ip_from proxy; #proxy container hostname if you are using docker
    server_name example.com;
    listen 443 proxy_protocol ssl http2;
    listen [::]:443 proxy_protocol ssl http2;
    root /var/www/html;
    charset UTF-8;
    add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload';
    add_header X-Frame-Options SAMEORIGIN;
    add_header X-Content-Type-Options nosniff;
    add_header X-XSS-Protection "1; mode=block";
    add_header Referrer-Policy no-referrer;
    ssl_prefer_server_ciphers on;
    ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
    ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
    ssl_session_cache   shared:SSL:10m;
    ssl_session_timeout 10m;
    keepalive_timeout   70;
    ssl_buffer_size 1400;
    ssl_dhparam /etc/nginx/ssl/dhparam.pem;
    ssl_stapling on;
    ssl_stapling_verify on;
    resolver 8.8.8.8 8.8.4.4 valid=86400;
    resolver_timeout 10;
    ssl_certificate     /etc/nginx/certs/example.com.crt;
    ssl_certificate_key /etc/nginx/certs/example.com.key;
    ssl_trusted_certificate /etc/nginx/certs/example.com.crt;
location ~* \.(jpg|jpe?g|gif|png|ico|cur|gz|svgz|mp4|ogg|ogv|webm|htc|css|js|otf|eot|svg|ttf|woff|woff2)(\?ver=[0-9.]+)?$ {
    expires modified 1M;
    add_header Access-Control-Allow-Origin '*';
    add_header Pragma public;
    add_header Cache-Control "public, must-revalidate, proxy-revalidate";
    access_log off;
    }
    location ~ /.well-known { #For issuing LetsEncrypt Certificates
        allow all;
    }
location / {
    index index.php;
    try_files $uri $uri/ /index.php?$args;
    }
error_page  404    /404.php;

location ~ \.php$ {
    try_files       $uri =404;
    fastcgi_index   index.php;
    fastcgi_pass    unix:/tmp/php7-fpm.sock;
    #fastcgi_pass    php-container-hostname:9000; (if using docker)
    fastcgi_pass_request_headers on;
    fastcgi_split_path_info ^(.+\.php)(/.+)$;
    fastcgi_param   SCRIPT_FILENAME  $document_root$fastcgi_script_name;
    fastcgi_intercept_errors on;
    fastcgi_ignore_client_abort off;
    fastcgi_connect_timeout 60;
    fastcgi_send_timeout 180;
    fastcgi_read_timeout 180;
    fastcgi_request_buffering on;
    fastcgi_buffer_size 128k;
    fastcgi_buffers 4 256k;
    fastcgi_busy_buffers_size 256k;
    fastcgi_temp_file_write_size 256k;
    include fastcgi_params;
}
location = /robots.txt {
    access_log off;
    log_not_found off;
    }
location ~ /\. {
    deny  all;
    access_log off;
    log_not_found off;
    }
}

И наконец, образец [1 118] 2 nodejs веб-сервера : Первый сервер:

var http = require('http');

http.createServer(function (req, res) {
    res.writeHead(200, {'Content-Type': 'text/plain'});
    res.end('Hello From Nodejs\n');
}).listen(8080, "192.168.1.4");
console.log('Server running at http://192.168.1.4:8080/');

Второй сервер:

var http = require('http');

http.createServer(function (req, res) {
    res.writeHead(200, {'Content-Type': 'text/plain'});
    res.end('Hello From Nodejs\n');
}).listen(8080, "192.168.1.5");
console.log('Server running at http://192.168.1.5:8080/');

Теперь все должно быть совершенно рабочим и сбалансировано с загрузки.

Некоторое время назад я записал приблизительно , Как создать Nginx как подсистему балансировки нагрузки TCP в Докере . Проверьте его при использовании Докера.

0
ответ дан 19 December 2019 в 20:20
поделиться
Другие вопросы по тегам:

Похожие вопросы: