Задать вопрос

Почему nginx такой избирательный в медиафайлах?

Здравствуй, Хабр!
Есть сайт 132619.simplecloud.club, у которого на первом слайде должен быть видос, но в error логах nginx выдаёт
open() "/var/www/application/current/public/assets/video.jpg" failed (2: No such file or directory), client: 2323, server: _, request: "GET /asset$...
 open() "/var/www/application/current/public/assets/video.webm" failed (2: No such file or directory), client: 32323, server: _, request: "GET /asse$..
 open() "/var/www/application/current/public/assets/assets/video.jpg" failed (2: No such file or directory), client: 324343...

Проверял права у файлов, всё нормально, ровно такие же, как и у остальных.
Прямо в той же директории находятся другие изображения, которые нормально отдаются.
Вот код nginx с pagespeed
user nginx web;

pid /var/www/run/nginx.pid;
error_log /var/www/log/nginx.error.log;

events {
  worker_connections 1024; # increase if you have lots of clients
  accept_mutex off; # "on" if nginx worker_processes > 1
  use epoll; # enable for Linux 2.6+
  # use kqueue; # enable for FreeBSD, OSX
}

http {
  # nginx will find this file in the config directory set at nginx build time
  include mime.types;
  types_hash_max_size 2048;
  server_names_hash_bucket_size 64;
  # fallback in case we can't determine a type
  default_type application/octet-stream;

  # click tracking!
  access_log /var/www/log/nginx.access.log combined;

  # you generally want to serve static files with nginx since neither
  # Unicorn nor Rainbows! is optimized for it at the moment
  sendfile on;

  tcp_nopush on; # off may be better for *some* Comet/long-poll stuff
  tcp_nodelay off; # on may be better for some Comet/long-poll stuff

  # we haven't checked to see if Rack::Deflate on the app server is
  # faster or not than doing compression via nginx.  It's easier
  # to configure it all in one place here for static files and also
  # to disable gzip for clients who don't get gzip/deflate right.
  # There are other gzip settings that may be needed used to deal with
  # bad clients out there, see http://wiki.nginx.org/NginxHttpGzipModule
  gzip on;
  gzip_http_version 1.0;
  gzip_proxied any;
  gzip_min_length 0;
  gzip_vary on;
  gzip_disable "MSIE [1-6]\.";
  gzip_proxied expired no-cache no-store private auth;
  gzip_comp_level 9;
  gzip_types text/plain text/xml text/css
             text/comma-separated-values
             text/javascript application/x-javascript
             application/atom+xml;

  # this can be any application server, not just Unicorn/Rainbows!
  upstream app_server {
    server unix:/var/www/application/current/tmp/sockets/.unicorn.sock fail_timeout=0;
  }

  server {
    # PageSpeed
    pagespeed on;
    pagespeed FileCachePath /var/ngx_pagespeed_cache;
    location ~ "\.pagespeed\.([a-z]\.)?[a-z]{2}\.[^.]{10}\.[^.]+" {
      add_header "" "";
    }
    location ~ "^/ngx_pagespeed_static/" { }
    location ~ "^/ngx_pagespeed_beacon$" { }
    location /ngx_pagespeed_statistics {
      allow 127.0.0.1; allow 5.228.169.73; deny all;
    }
    location /ngx_pagespeed_global_statistics {
      allow 127.0.0.1; allow 5.228.169.73; deny all;
    }
    pagespeed MessageBufferSize 100000;
    location /ngx_pagespeed_message {
      allow 127.0.0.1; allow 5.228.169.73; deny all;
    }
    location /pagespeed_console {
      allow 127.0.0.1; allow 5.228.169.73; deny all;
    }






    charset utf-8;
    # enable one of the following if you're on Linux or FreeBSD
    listen 80 default deferred; # for Linux
    # listen 80 default accept_filter=httpready; # for FreeBSD

    # If you have IPv6, you'll likely want to have two separate listeners.
    # One on IPv4 only (the default), and another on IPv6 only instead
    # of a single dual-stack listener.  A dual-stack listener will make
    # for ugly IPv4 addresses in $remote_addr (e.g ":ffff:10.0.0.1"
    # instead of just "10.0.0.1") and potentially trigger bugs in
    # some software.
    # listen [::]:80 ipv6only=on; # deferred or accept_filter recommended

    client_max_body_size 4G;
    server_name _;

    # ~2 seconds is often enough for most folks to parse HTML/CSS and
    # retrieve needed images/icons/frames, connections are cheap in
    #   nginx so increasing this is generally safe...
    keepalive_timeout 5;

    # path for static files
    root /var/www/application/current/public;

    # Prefer to serve static files directly from nginx to avoid unnecessary
    # data copies from the application server.
    #
    # try_files directive appeared in in nginx 0.7.27 and has stabilized
    # over time.  Older versions of nginx (e.g. 0.6.x) requires
    # "if (!-f $request_filename)" which was less efficient:
    # http://bogomips.org/unicorn.git/tree/examples/nginx.conf?id=v3.3.1#n127
    try_files $uri/index.html $uri.html $uri @app;

    location ~ ^/(assets)/  {
      root /var/www/application/current/public;

      expires max;
      add_header Cache-Control public;
    }
    location @app {
      # an HTTP header important enough to have its own Wikipedia entry:
      #   http://en.wikipedia.org/wiki/X-Forwarded-For
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

      # enable this if you forward HTTPS traffic to unicorn,
      # this helps Rack set the proper URL scheme for doing redirects:
      # proxy_set_header X-Forwarded-Proto $scheme;

      # pass the Host: header from the client right along so redirects
      # can be set properly within the Rack application
      proxy_set_header Host $http_host;

      # we don't want nginx trying to do something clever with
      # redirects, we set the Host: header above already.
      proxy_redirect off;

      # set "proxy_buffering off" *only* for Rainbows! when doing
      # Comet/long-poll/streaming.  It's also safe to set if you're using
      # only serving fast clients with Unicorn + nginx, but not slow
      # clients.  You normally want nginx to buffer responses to slow
      # clients, even with Rails 3.1 streaming because otherwise a slow
      # client can become a bottleneck of Unicorn.
      #
      # The Rack application may also set "X-Accel-Buffering (yes|no)"
      # in the response headers do disable/enable buffering on a
      # per-response basis.
      # proxy_buffering off;

      proxy_pass http://app_server;
    }

    # Rails error pages
    error_page 500 502 503 504 /500.html;
    location = /500.html {
      root /var/www/application/current/public;
    }
  }
}


Я в nginx вообще не волоку, пытался гуглить/разобраться, но толком ничего не понял и не нашёл.
  • Вопрос задан
  • 326 просмотров
Подписаться 2 Оценить Комментировать
Решения вопроса 1
zaurius
@zaurius Автор вопроса
Опубликовал теги к видео через хелпер video_tag, всё заработало.
Ответ написан
Комментировать
Пригласить эксперта
Ваш ответ на вопрос

Войдите, чтобы написать ответ

Похожие вопросы