feat: hot reload (#2031)

This patch brings hot reloading capabilities to PHP apps: in
development, the browser will automatically refresh the page when any
source file changes!
It's similar to HMR in JavaScript.

It is built on top of [the watcher
mechanism](https://frankenphp.dev/docs/config/#watching-for-file-changes)
and of the [Mercure](https://frankenphp.dev/docs/mercure/) integration.

Each time a watched file is modified, a Mercure update is sent, giving
the ability to the client to reload the page, or part of the page
(assets, images...).

Here is an example implementation:

```caddyfile
root ./public


mercure {
      subscriber_jwt {env.MERCURE_SUBSCRIBER_JWT_KEY}
      anonymous
}

php_server {
      hot_reload
}
```

```php
<?php
header('Content-Type: text/html');
?>
<!DOCTYPE html>
<html lang="en">
<head>
<title>Test</title>
<script>
    const es = new EventSource('<?=$_SERVER['FRANKENPHP_HOT_RELOAD']?>');
    es.onmessage = () => location.reload();
</script>
</head>
<body>
Hello
```

I plan to create a helper JS library to handle more advanced cases
(reloading CSS, JS, etc), similar to [HotWire
Spark](https://github.com/hotwired/spark). Be sure to attend my
SymfonyCon to learn more!

There is still room for improvement:

- Provide an option to only trigger the update without reloading the
worker for some files (ex, images, JS, CSS...)
- Support classic mode (currently, only the worker mode is supported)
- Don't reload all workers when only the files used by one change

However, this PR is working as-is and can be merged as a first step.

This patch heavily refactors the watcher module. Maybe it will be
possible to extract it as a standalone library at some point (would be
useful to add a similar feature but not tight to PHP as a Caddy module).

---------

Signed-off-by: Kévin Dunglas <kevin@dunglas.fr>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
Kévin Dunglas
2025-12-12 14:29:18 +01:00
committed by GitHub
parent d2007620a4
commit 225ca409d3
40 changed files with 1247 additions and 671 deletions

View File

@@ -4,12 +4,13 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/dunglas/frankenphp/internal/fastabs"
"io"
"net/http"
"sync"
"testing"
"github.com/dunglas/frankenphp/internal/fastabs"
"github.com/caddyserver/caddy/v2/caddytest"
"github.com/dunglas/frankenphp"
"github.com/stretchr/testify/assert"

View File

@@ -49,13 +49,14 @@ type FrankenPHPApp struct {
NumThreads int `json:"num_threads,omitempty"`
// MaxThreads limits how many threads can be started at runtime. Default 2x NumThreads
MaxThreads int `json:"max_threads,omitempty"`
// Workers configures the worker scripts to start.
// Workers configures the worker scripts to start
Workers []workerConfig `json:"workers,omitempty"`
// Overwrites the default php ini configuration
PhpIni map[string]string `json:"php_ini,omitempty"`
// The maximum amount of time a request may be stalled waiting for a thread
MaxWaitTime time.Duration `json:"max_wait_time,omitempty"`
opts []frankenphp.Option
metrics frankenphp.Metrics
ctx context.Context
logger *slog.Logger
@@ -76,6 +77,9 @@ func (f *FrankenPHPApp) Provision(ctx caddy.Context) error {
f.ctx = ctx
f.logger = ctx.Slogger()
// We have at least 7 hardcoded options
f.opts = make([]frankenphp.Option, 0, 7+len(options))
if httpApp, err := ctx.AppIfConfigured("http"); err == nil {
if httpApp.(*caddyhttp.App).Metrics != nil {
f.metrics = frankenphp.NewPrometheusMetrics(ctx.GetMetricsRegistry())
@@ -135,11 +139,10 @@ func (f *FrankenPHPApp) Start() error {
repl := caddy.NewReplacer()
optionsMU.RLock()
opts := make([]frankenphp.Option, 0, len(options)+len(f.Workers)+7)
opts = append(opts, options...)
f.opts = append(f.opts, options...)
optionsMU.RUnlock()
opts = append(opts,
f.opts = append(f.opts,
frankenphp.WithContext(f.ctx),
frankenphp.WithLogger(f.logger),
frankenphp.WithNumThreads(f.NumThreads),
@@ -150,31 +153,19 @@ func (f *FrankenPHPApp) Start() error {
)
for _, w := range f.Workers {
workerOpts := make([]frankenphp.WorkerOption, 0, len(w.requestOptions)+4)
w.options = append(w.options,
frankenphp.WithWorkerEnv(w.Env),
frankenphp.WithWorkerWatchMode(w.Watch),
frankenphp.WithWorkerMaxFailures(w.MaxConsecutiveFailures),
frankenphp.WithWorkerMaxThreads(w.MaxThreads),
frankenphp.WithWorkerRequestOptions(w.requestOptions...),
)
if w.requestOptions == nil {
workerOpts = append(workerOpts,
frankenphp.WithWorkerEnv(w.Env),
frankenphp.WithWorkerWatchMode(w.Watch),
frankenphp.WithWorkerMaxFailures(w.MaxConsecutiveFailures),
frankenphp.WithWorkerMaxThreads(w.MaxThreads),
)
} else {
workerOpts = append(
workerOpts,
frankenphp.WithWorkerEnv(w.Env),
frankenphp.WithWorkerWatchMode(w.Watch),
frankenphp.WithWorkerMaxFailures(w.MaxConsecutiveFailures),
frankenphp.WithWorkerMaxThreads(w.MaxThreads),
frankenphp.WithWorkerRequestOptions(w.requestOptions...),
)
}
opts = append(opts, frankenphp.WithWorkers(w.Name, repl.ReplaceKnown(w.FileName, ""), w.Num, workerOpts...))
f.opts = append(f.opts, frankenphp.WithWorkers(w.Name, repl.ReplaceKnown(w.FileName, ""), w.Num, w.options...))
}
frankenphp.Shutdown()
if err := frankenphp.Init(opts...); err != nil {
if err := frankenphp.Init(f.opts...); err != nil {
return err
}
@@ -288,7 +279,7 @@ func (f *FrankenPHPApp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
}
case "worker":
wc, err := parseWorkerConfig(d)
wc, err := unmarshalWorker(d)
if err != nil {
return err
}

View File

@@ -12,7 +12,7 @@ import (
const (
defaultDocumentRoot = "public"
defaultWatchPattern = "./**/*.{php,yaml,yml,twig,env}"
defaultWatchPattern = "./**/*.{env,php,twig,yaml,yml}"
)
func init() {

View File

@@ -1362,7 +1362,7 @@ func TestWorkerMatchDirective(t *testing.T) {
}
`, "caddyfile")
// worker is outside of public directory, match anyways
// worker is outside public directory, match anyway
tester.AssertGetResponse("http://localhost:"+testPort+"/matched-path", http.StatusOK, "requests:1")
tester.AssertGetResponse("http://localhost:"+testPort+"/matched-path/anywhere", http.StatusOK, "requests:2")

View File

@@ -181,7 +181,7 @@ func TestModuleWorkerWithWatchConfiguration(t *testing.T) {
// Verify that the watch directories were set correctly
require.Len(t, module.Workers[0].Watch, 3, "Expected three watch patterns")
require.Equal(t, "./**/*.{php,yaml,yml,twig,env}", module.Workers[0].Watch[0], "First watch pattern should be the default")
require.Equal(t, defaultWatchPattern, module.Workers[0].Watch[0], "First watch pattern should be the default")
require.Equal(t, "./src/**/*.php", module.Workers[0].Watch[1], "Second watch pattern should match the configuration")
require.Equal(t, "./config/**/*.yaml", module.Workers[0].Watch[2], "Third watch pattern should match the configuration")
}

View File

@@ -1,6 +1,6 @@
module github.com/dunglas/frankenphp/caddy
go 1.25.0
go 1.25.4
replace github.com/dunglas/frankenphp => ../
@@ -11,6 +11,7 @@ require (
github.com/caddyserver/certmagic v0.25.0
github.com/dunglas/caddy-cbrotli v1.0.1
github.com/dunglas/frankenphp v1.10.1
github.com/dunglas/mercure v0.21.2
github.com/dunglas/mercure/caddy v0.21.2
github.com/dunglas/vulcain/caddy v1.2.1
github.com/prometheus/client_golang v1.23.2
@@ -59,10 +60,10 @@ require (
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect
github.com/dlclark/regexp2 v1.11.5 // indirect
github.com/dunglas/httpsfv v1.1.0 // indirect
github.com/dunglas/mercure v0.21.2 // indirect
github.com/dunglas/skipfilter v1.0.0 // indirect
github.com/dunglas/vulcain v1.2.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/e-dant/watcher/watcher-go v0.0.0-20251208164151-f88ec3b7e146 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect

View File

@@ -159,6 +159,8 @@ github.com/dunglas/vulcain/caddy v1.2.1/go.mod h1:8QrmLTfURmW2VgjTR6Gb9a53FrZjsp
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/e-dant/watcher/watcher-go v0.0.0-20251208164151-f88ec3b7e146 h1:h3vVM6X45PK0mAk8NqiYNQGXTyhvXy1HQ5GhuQN4eeA=
github.com/e-dant/watcher/watcher-go v0.0.0-20251208164151-f88ec3b7e146/go.mod h1:sVUOkwtftoj71nnJRG2S0oWNfXFdKpz/M9vK0z06nmM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=

20
caddy/hotreload-skip.go Normal file
View File

@@ -0,0 +1,20 @@
//go:build nowatcher || nomercure
package caddy
import (
"errors"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
type hotReloadContext struct {
}
func (_ *FrankenPHPModule) configureHotReload(_ *FrankenPHPApp) error {
return nil
}
func (_ *FrankenPHPModule) unmarshalHotReload(d *caddyfile.Dispenser) error {
return errors.New("hot reload support disabled")
}

111
caddy/hotreload.go Normal file
View File

@@ -0,0 +1,111 @@
//go:build !nowatcher && !nomercure
package caddy
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"hash/fnv"
"net/url"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/dunglas/frankenphp"
)
const defaultHotReloadPattern = "./**/*.{css,env,gif,htm,html,jpg,jpeg,js,mjs,php,png,svg,twig,webp,xml,yaml,yml}"
type hotReloadContext struct {
// HotReload specifies files to watch for file changes to trigger hot reloads updates. Supports the glob syntax.
HotReload *hotReloadConfig `json:"hot_reload,omitempty"`
}
type hotReloadConfig struct {
Topic string `json:"topic"`
Watch []string `json:"watch"`
}
func (f *FrankenPHPModule) configureHotReload(app *FrankenPHPApp) error {
if f.HotReload == nil {
return nil
}
if f.mercureHub == nil {
return errors.New("unable to enable hot reloading: no Mercure hub configured")
}
if len(f.HotReload.Watch) == 0 {
f.HotReload.Watch = []string{defaultHotReloadPattern}
}
if f.HotReload.Topic == "" {
uid, err := uniqueID(f)
if err != nil {
return err
}
f.HotReload.Topic = "https://frankenphp.dev/hot-reload/" + uid
}
app.opts = append(app.opts, frankenphp.WithHotReload(f.HotReload.Topic, f.mercureHub, f.HotReload.Watch))
f.preparedEnv["FRANKENPHP_HOT_RELOAD\x00"] = "/.well-known/mercure?topic=" + url.QueryEscape(f.HotReload.Topic)
return nil
}
func (f *FrankenPHPModule) unmarshalHotReload(d *caddyfile.Dispenser) error {
patterns := d.RemainingArgs()
if len(patterns) > 0 {
f.HotReload = &hotReloadConfig{
Watch: patterns,
}
}
for d.NextBlock(1) {
switch v := d.Val(); v {
case "topic":
if !d.NextArg() {
return d.ArgErr()
}
if f.HotReload == nil {
f.HotReload = &hotReloadConfig{}
}
f.HotReload.Topic = d.Val()
case "watch":
patterns := d.RemainingArgs()
if len(patterns) == 0 {
return d.ArgErr()
}
if f.HotReload == nil {
f.HotReload = &hotReloadConfig{}
}
f.HotReload.Watch = append(f.HotReload.Watch, patterns...)
default:
return wrongSubDirectiveError("hot_reload", "topic, watch", v)
}
}
return nil
}
func uniqueID(s any) (string, error) {
var b bytes.Buffer
if err := gob.NewEncoder(&b).Encode(s); err != nil {
return "", fmt.Errorf("unable to generate unique name: %w", err)
}
h := fnv.New64a()
if _, err := h.Write(b.Bytes()); err != nil {
return "", fmt.Errorf("unable to generate unique name: %w", err)
}
return fmt.Sprintf("%016x", h.Sum64()), nil
}

88
caddy/hotreload_test.go Normal file
View File

@@ -0,0 +1,88 @@
//go:build !nowatcher && !nomercure
package caddy_test
import (
"context"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"github.com/caddyserver/caddy/v2/caddytest"
"github.com/stretchr/testify/require"
)
func TestHotReload(t *testing.T) {
const topic = "https://frankenphp.dev/hot-reload/test"
u := "/.well-known/mercure?topic=" + url.QueryEscape(topic)
tmpDir := t.TempDir()
indexFile := filepath.Join(tmpDir, "index.php")
tester := caddytest.NewTester(t)
tester.InitServer(`
{
debug
skip_install_trust
admin localhost:2999
}
http://localhost:`+testPort+` {
mercure {
transport local
subscriber_jwt TestKey
anonymous
}
php_server {
root `+tmpDir+`
hot_reload {
topic `+topic+`
watch `+tmpDir+`/*.php
}
}
`, "caddyfile")
var connected, received sync.WaitGroup
connected.Add(1)
received.Go(func() {
cx, cancel := context.WithCancel(t.Context())
req, _ := http.NewRequest(http.MethodGet, "http://localhost:"+testPort+u, nil)
req = req.WithContext(cx)
resp := tester.AssertResponseCode(req, http.StatusOK)
connected.Done()
var receivedBody strings.Builder
buf := make([]byte, 1024)
for {
_, err := resp.Body.Read(buf)
require.NoError(t, err)
receivedBody.Write(buf)
if strings.Contains(receivedBody.String(), "index.php") {
cancel()
break
}
}
require.NoError(t, resp.Body.Close())
})
connected.Wait()
require.NoError(t, os.WriteFile(indexFile, []byte("<?=$_SERVER['FRANKENPHP_HOT_RELOAD'];"), 0644))
received.Wait()
tester.AssertGetResponse("http://localhost:"+testPort+"/index.php", http.StatusOK, u)
}

View File

@@ -2,9 +2,12 @@
package caddy
import (
"github.com/caddyserver/caddy/v2"
)
func (f *FrankenPHPModule) assignMercureHubRequestOption(_ caddy.Context) {
type mercureContext struct {
}
func (f *FrankenPHPModule) configureHotReload(_ *FrankenPHPApp) error {
return nil
}
func (f *FrankenPHPModule) assignMercureHub(_ caddy.Context) {
}

View File

@@ -5,6 +5,7 @@ package caddy
import (
"github.com/caddyserver/caddy/v2"
"github.com/dunglas/frankenphp"
"github.com/dunglas/mercure"
mercureCaddy "github.com/dunglas/mercure/caddy"
)
@@ -12,9 +13,22 @@ func init() {
mercureCaddy.AllowNoPublish = true
}
func (f *FrankenPHPModule) assignMercureHubRequestOption(ctx caddy.Context) {
if hub := mercureCaddy.FindHub(ctx.Modules()); hub != nil {
opt := frankenphp.WithMercureHub(hub)
f.mercureHubRequestOption = &opt
type mercureContext struct {
mercureHub *mercure.Hub
}
func (f *FrankenPHPModule) assignMercureHub(ctx caddy.Context) {
if f.mercureHub = mercureCaddy.FindHub(ctx.Modules()); f.mercureHub == nil {
return
}
opt := frankenphp.WithMercureHub(f.mercureHub)
f.mercureHubRequestOption = &opt
for i, wc := range f.Workers {
wc.mercureHub = f.mercureHub
wc.options = append(wc.options, frankenphp.WithWorkerMercureHub(wc.mercureHub))
f.Workers[i] = wc
}
}

View File

@@ -33,6 +33,9 @@ var serverHeader = []string{"FrankenPHP Caddy"}
// }
// }
type FrankenPHPModule struct {
mercureContext
hotReloadContext
// Root sets the root folder to the site. Default: `root` directive, or the path of the public directory of the embed app it exists.
Root string `json:"root,omitempty"`
// SplitPath sets the substrings for splitting the URI into two parts. The first matching substring will be used to split the "path info" from the path. The first piece is suffixed with the matching substring and will be assumed as the actual resource (CGI script) name. The second piece will be set to PATH_INFO for the CGI script to use. Default: `.php`.
@@ -74,8 +77,9 @@ func (f *FrankenPHPModule) Provision(ctx caddy.Context) error {
return fmt.Errorf(`expected ctx.App("frankenphp") to return *FrankenPHPApp, got nil`)
}
f.assignMercureHubRequestOption(ctx)
f.assignMercureHub(ctx)
loggerOpt := frankenphp.WithRequestLogger(f.logger)
for i, wc := range f.Workers {
// make the file path absolute from the public directory
// this can only be done if the root is defined inside php_server
@@ -88,11 +92,7 @@ func (f *FrankenPHPModule) Provision(ctx caddy.Context) error {
wc.inheritEnv(f.Env)
}
wc.requestOptions = []frankenphp.RequestOption{frankenphp.WithRequestLogger(f.logger)}
if f.mercureHubRequestOption != nil {
wc.requestOptions = append(wc.requestOptions, *f.mercureHubRequestOption)
}
wc.requestOptions = append(wc.requestOptions, loggerOpt)
f.Workers[i] = wc
}
@@ -106,14 +106,13 @@ func (f *FrankenPHPModule) Provision(ctx caddy.Context) error {
if frankenphp.EmbeddedAppPath == "" {
f.Root = "{http.vars.root}"
} else {
rrs := false
f.Root = filepath.Join(frankenphp.EmbeddedAppPath, defaultDocumentRoot)
var rrs bool
f.ResolveRootSymlink = &rrs
}
} else {
if frankenphp.EmbeddedAppPath != "" && filepath.IsLocal(f.Root) {
f.Root = filepath.Join(frankenphp.EmbeddedAppPath, f.Root)
}
} else if frankenphp.EmbeddedAppPath != "" && filepath.IsLocal(f.Root) {
f.Root = filepath.Join(frankenphp.EmbeddedAppPath, f.Root)
}
if len(f.SplitPath) == 0 {
@@ -154,6 +153,10 @@ func (f *FrankenPHPModule) Provision(ctx caddy.Context) error {
}
}
if err := f.configureHotReload(fapp); err != nil {
return err
}
return nil
}
@@ -164,11 +167,15 @@ func needReplacement(s string) bool {
// ServeHTTP implements caddyhttp.MiddlewareHandler.
func (f *FrankenPHPModule) ServeHTTP(w http.ResponseWriter, r *http.Request, _ caddyhttp.Handler) error {
origReq := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
ctx := r.Context()
origReq := ctx.Value(caddyhttp.OriginalRequestCtxKey).(http.Request)
repl := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
var (
documentRootOption frankenphp.RequestOption
documentRoot string
)
var documentRootOption frankenphp.RequestOption
var documentRoot string
if f.resolvedDocumentRoot == "" {
documentRoot = repl.ReplaceKnown(f.Root, "")
if documentRoot == "" && frankenphp.EmbeddedAppPath != "" {
@@ -278,14 +285,20 @@ func (f *FrankenPHPModule) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
f.ResolveRootSymlink = &v
case "worker":
wc, err := parseWorkerConfig(d)
wc, err := unmarshalWorker(d)
if err != nil {
return err
}
f.Workers = append(f.Workers, wc)
case "hot_reload":
if err := f.unmarshalHotReload(d); err != nil {
return err
}
default:
return wrongSubDirectiveError("php or php_server", "root, split, env, resolve_root_symlink, worker", d.Val())
return wrongSubDirectiveError("php or php_server", "hot_reload, name, root, split, env, resolve_root_symlink, worker", d.Val())
}
}
}
@@ -294,7 +307,7 @@ func (f *FrankenPHPModule) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
fileNames := make(map[string]struct{}, len(f.Workers))
for _, w := range f.Workers {
if _, ok := fileNames[w.FileName]; ok {
return fmt.Errorf(`workers in a single "php_server" block must not have duplicate filenames: %q`, w.FileName)
return fmt.Errorf(`workers in a single "php" or "php_server" block must not have duplicate filenames: %q`, w.FileName)
}
if len(w.MatchPath) == 0 {

View File

@@ -22,6 +22,8 @@ import (
// }
// }
type workerConfig struct {
mercureContext
// Name for the worker. Default: the filename for FrankenPHPApp workers, always prefixed with "m#" for FrankenPHPModule workers.
Name string `json:"name,omitempty"`
// FileName sets the path to the worker script.
@@ -39,10 +41,11 @@ type workerConfig struct {
// MaxConsecutiveFailures sets the maximum number of consecutive failures before panicking (defaults to 6, set to -1 to never panick)
MaxConsecutiveFailures int `json:"max_consecutive_failures,omitempty"`
options []frankenphp.WorkerOption
requestOptions []frankenphp.RequestOption
}
func parseWorkerConfig(d *caddyfile.Dispenser) (workerConfig, error) {
func unmarshalWorker(d *caddyfile.Dispenser) (workerConfig, error) {
wc := workerConfig{}
if d.NextArg() {
wc.FileName = d.Val()
@@ -66,8 +69,7 @@ func parseWorkerConfig(d *caddyfile.Dispenser) (workerConfig, error) {
}
for d.NextBlock(1) {
v := d.Val()
switch v {
switch v := d.Val(); v {
case "name":
if !d.NextArg() {
return wc, d.ArgErr()
@@ -110,11 +112,12 @@ func parseWorkerConfig(d *caddyfile.Dispenser) (workerConfig, error) {
}
wc.Env[args[0]] = args[1]
case "watch":
if !d.NextArg() {
patterns := d.RemainingArgs()
if len(patterns) == 0 {
// the default if the watch directory is left empty:
wc.Watch = append(wc.Watch, defaultWatchPattern)
} else {
wc.Watch = append(wc.Watch, d.Val())
wc.Watch = append(wc.Watch, patterns...)
}
case "match":
// provision the path so it's identical to Caddy match rules

View File

@@ -251,6 +251,7 @@ func Init(options ...Option) error {
opt := &opt{}
for _, o := range options {
if err := o(opt); err != nil {
Shutdown()
return err
}
}
@@ -277,6 +278,7 @@ func Init(options ...Option) error {
workerThreadCount, err := calculateMaxThreads(opt)
if err != nil {
Shutdown()
return err
}
@@ -285,6 +287,7 @@ func Init(options ...Option) error {
config := Config()
if config.Version.MajorVersion < 8 || (config.Version.MajorVersion == 8 && config.Version.MinorVersion < 2) {
Shutdown()
return ErrInvalidPHPVersion
}
@@ -304,6 +307,7 @@ func Init(options ...Option) error {
mainThread, err := initPHPThreads(opt.numThreads, opt.maxThreads, opt.phpIni)
if err != nil {
Shutdown()
return err
}
@@ -314,6 +318,13 @@ func Init(options ...Option) error {
}
if err := initWorkers(opt.workers); err != nil {
Shutdown()
return err
}
if err := initWatchers(opt); err != nil {
Shutdown()
return err
}
@@ -352,7 +363,7 @@ func Shutdown() {
fn()
}
drainWatcher()
drainWatchers()
drainAutoScaling()
drainPHPThreads()
@@ -739,5 +750,6 @@ func resetGlobals() {
globalCtx = context.Background()
globalLogger = slog.Default()
workers = nil
watcherIsEnabled = false
globalMu.Unlock()
}

View File

@@ -618,8 +618,9 @@ func testRequestHeaders(t *testing.T, opts *testOptions) {
}
func TestFailingWorker(t *testing.T) {
t.Cleanup(frankenphp.Shutdown)
err := frankenphp.Init(
frankenphp.WithLogger(slog.New(slog.NewTextHandler(io.Discard, nil))),
frankenphp.WithWorkers("failing worker", "testdata/failing-worker.php", 4, frankenphp.WithWorkerMaxFailures(1)),
frankenphp.WithNumThreads(5),
)

3
go.mod
View File

@@ -1,12 +1,13 @@
module github.com/dunglas/frankenphp
go 1.25.0
go 1.25.4
retract v1.0.0-rc.1 // Human error
require (
github.com/Masterminds/sprig/v3 v3.3.0
github.com/dunglas/mercure v0.21.2
github.com/e-dant/watcher/watcher-go v0.0.0-20251208164151-f88ec3b7e146
github.com/maypok86/otter/v2 v2.2.1
github.com/prometheus/client_golang v1.23.2
github.com/stretchr/testify v1.11.1

2
go.sum
View File

@@ -22,6 +22,8 @@ github.com/dunglas/mercure v0.21.2 h1:qaLTScSwsCHDps++4AeLWrRp3BysdR5EoHBqu7JNha
github.com/dunglas/mercure v0.21.2/go.mod h1:3ElA7VwRI8BHUIAVU8oGlvPaqGwsKU5zZVWFNSFg/+U=
github.com/dunglas/skipfilter v1.0.0 h1:JG9SgGg4n6BlFwuTYzb9RIqjH7PfwszvWehanrYWPF4=
github.com/dunglas/skipfilter v1.0.0/go.mod h1:ryhr8j7CAHSjzeN7wI6YEuwoArQ3OQmRqWWVCEAfb9w=
github.com/e-dant/watcher/watcher-go v0.0.0-20251208164151-f88ec3b7e146 h1:h3vVM6X45PK0mAk8NqiYNQGXTyhvXy1HQ5GhuQN4eeA=
github.com/e-dant/watcher/watcher-go v0.0.0-20251208164151-f88ec3b7e146/go.mod h1:sVUOkwtftoj71nnJRG2S0oWNfXFdKpz/M9vK0z06nmM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=

44
hotreload.go Normal file
View File

@@ -0,0 +1,44 @@
//go:build !nomercure && !nowatcher
package frankenphp
import (
"encoding/json"
"log/slog"
"github.com/dunglas/frankenphp/internal/watcher"
"github.com/dunglas/mercure"
watcherGo "github.com/e-dant/watcher/watcher-go"
)
// WithHotReload sets files to watch for file changes to trigger a hot reload update.
func WithHotReload(topic string, hub *mercure.Hub, patterns []string) Option {
return func(o *opt) error {
o.hotReload = append(o.hotReload, &watcher.PatternGroup{
Patterns: patterns,
Callback: func(events []*watcherGo.Event) {
// Wait for workers to restart before sending the update
go func() {
data, err := json.Marshal(events)
if err != nil {
if globalLogger.Enabled(globalCtx, slog.LevelError) {
globalLogger.LogAttrs(globalCtx, slog.LevelError, "error marshaling watcher events", slog.Any("error", err))
}
return
}
if err := hub.Publish(globalCtx, &mercure.Update{
Topics: []string{topic},
Event: mercure.Event{Data: string(data)},
Debug: globalLogger.Enabled(globalCtx, slog.LevelDebug),
}); err != nil && globalLogger.Enabled(globalCtx, slog.LevelError) {
globalLogger.LogAttrs(globalCtx, slog.LevelError, "error publishing hot reloading Mercure update", slog.Any("error", err))
}
}()
},
})
return nil
}
}

View File

@@ -6,6 +6,23 @@ import (
"os"
"path/filepath"
)
var (
wd string
wderr error
)
func init() {
wd, wderr = os.Getwd()
if wderr != nil {
return
}
canonicalWD, err := filepath.EvalSymlinks(wd)
if err != nil {
wd = canonicalWD
}
}
// FastAbs is an optimized version of filepath.Abs for Unix systems,
// since we don't expect the working directory to ever change once
@@ -22,4 +39,3 @@ func FastAbs(path string) (string, error) {
return filepath.Join(wd, path), nil
}
var wd, wderr = os.Getwd()

View File

@@ -96,15 +96,20 @@ func (ts *ThreadState) notifySubscribers(nextState State) {
if len(ts.subscribers) == 0 {
return
}
newSubscribers := []stateSubscriber{}
var newSubscribers []stateSubscriber
// notify subscribers to the state change
for _, sub := range ts.subscribers {
if !slices.Contains(sub.states, nextState) {
newSubscribers = append(newSubscribers, sub)
continue
}
close(sub.ch)
}
ts.subscribers = newSubscribers
}
@@ -131,22 +136,25 @@ func (ts *ThreadState) RequestSafeStateChange(nextState State) bool {
// disallow state changes if shutting down or done
case ShuttingDown, Done, Reserved:
ts.mu.Unlock()
return false
// ready and inactive are safe states to transition from
case Ready, Inactive:
ts.currentState = nextState
ts.notifySubscribers(nextState)
ts.mu.Unlock()
return true
}
ts.mu.Unlock()
// wait for the state to change to a safe state
ts.WaitFor(Ready, Inactive, ShuttingDown)
return ts.RequestSafeStateChange(nextState)
}
// markAsWaiting hints that the thread reached a stable state and is waiting for requests or shutdown
// MarkAsWaiting hints that the thread reached a stable state and is waiting for requests or shutdown
func (ts *ThreadState) MarkAsWaiting(isWaiting bool) {
ts.mu.Lock()
if isWaiting {
@@ -158,15 +166,16 @@ func (ts *ThreadState) MarkAsWaiting(isWaiting bool) {
ts.mu.Unlock()
}
// isWaitingState returns true if a thread is waiting for a request or shutdown
// IsInWaitingState returns true if a thread is waiting for a request or shutdown
func (ts *ThreadState) IsInWaitingState() bool {
ts.mu.RLock()
isWaiting := ts.isWaiting
ts.mu.RUnlock()
return isWaiting
}
// waitTime returns the time since the thread is waiting in a stable state in ms
// WaitTime returns the time since the thread is waiting in a stable state in ms
func (ts *ThreadState) WaitTime() int64 {
ts.mu.RLock()
waitTime := int64(0)
@@ -174,6 +183,7 @@ func (ts *ThreadState) WaitTime() int64 {
waitTime = time.Now().UnixMilli() - ts.waitingSince.UnixMilli()
}
ts.mu.RUnlock()
return waitTime
}

View File

@@ -1,6 +0,0 @@
//go:build !nowatcher
package watcher
// #cgo LDFLAGS: -lwatcher-c -lstdc++
import "C"

218
internal/watcher/pattern.go Normal file
View File

@@ -0,0 +1,218 @@
//go:build !nowatcher
package watcher
import (
"log/slog"
"path/filepath"
"strings"
"github.com/dunglas/frankenphp/internal/fastabs"
"github.com/e-dant/watcher/watcher-go"
)
type pattern struct {
patternGroup *PatternGroup
value string
parsedValues []string
events chan eventHolder
failureCount int
watcher *watcher.Watcher
}
func (p *pattern) startSession() {
p.watcher = watcher.NewWatcher(p.value, p.handle)
if globalLogger.Enabled(globalCtx, slog.LevelDebug) {
globalLogger.LogAttrs(globalCtx, slog.LevelDebug, "watching", slog.String("pattern", p.value))
}
}
// this method prepares the pattern struct (aka /path/*pattern)
func (p *pattern) parse() (err error) {
// first we clean the value
absPattern, err := fastabs.FastAbs(p.value)
if err != nil {
return err
}
p.value = absPattern
// then we split the pattern to determine where the directory ends and the pattern starts
splitPattern := strings.Split(absPattern, string(filepath.Separator))
patternWithoutDir := ""
for i, part := range splitPattern {
isFilename := i == len(splitPattern)-1 && strings.Contains(part, ".")
isGlobCharacter := strings.ContainsAny(part, "[*?{")
if isFilename || isGlobCharacter {
patternWithoutDir = filepath.Join(splitPattern[i:]...)
p.value = filepath.Join(splitPattern[:i]...)
break
}
}
// now we split the pattern according to the recursive '**' syntax
p.parsedValues = strings.Split(patternWithoutDir, "**")
for i, pp := range p.parsedValues {
p.parsedValues[i] = strings.Trim(pp, string(filepath.Separator))
}
// remove the trailing separator and add leading separator
p.value = string(filepath.Separator) + strings.Trim(p.value, string(filepath.Separator))
// try to canonicalize the path
canonicalPattern, err := filepath.EvalSymlinks(p.value)
if err == nil {
p.value = canonicalPattern
}
return nil
}
func (p *pattern) allowReload(event *watcher.Event) bool {
if !isValidEventType(event.EffectType) || !isValidPathType(event) {
return false
}
// some editors create temporary files and never actually modify the original file
// so we need to also check Event.AssociatedPathName
// see https://github.com/php/frankenphp/issues/1375
return p.isValidPattern(event.PathName) || p.isValidPattern(event.AssociatedPathName)
}
func (p *pattern) handle(event *watcher.Event) {
// If the watcher prematurely sends the die@ event, retry watching
if event.PathType == watcher.PathTypeWatcher && strings.HasPrefix(event.PathName, "e/self/die@") && watcherIsActive.Load() {
p.retryWatching()
return
}
if p.allowReload(event) {
p.events <- eventHolder{p.patternGroup, event}
}
}
func (p *pattern) stop() {
p.watcher.Close()
}
func isValidEventType(effectType watcher.EffectType) bool {
return effectType <= watcher.EffectTypeDestroy
}
func isValidPathType(event *watcher.Event) bool {
if event.PathType == watcher.PathTypeWatcher && globalLogger.Enabled(globalCtx, slog.LevelDebug) {
globalLogger.LogAttrs(globalCtx, slog.LevelDebug, "special e-dant/watcher event", slog.Any("event", event))
}
return event.PathType <= watcher.PathTypeHardLink
}
func (p *pattern) isValidPattern(fileName string) bool {
if fileName == "" {
return false
}
// first we remove the dir from the file name
if !strings.HasPrefix(fileName, p.value) {
return false
}
// remove the directory path and separator from the filename
fileNameWithoutDir := strings.TrimPrefix(strings.TrimPrefix(fileName, p.value), string(filepath.Separator))
// if the pattern has size 1 we can match it directly against the filename
if len(p.parsedValues) == 1 {
return matchBracketPattern(p.parsedValues[0], fileNameWithoutDir)
}
return p.matchPatterns(fileNameWithoutDir)
}
func (p *pattern) matchPatterns(fileName string) bool {
partsToMatch := strings.Split(fileName, string(filepath.Separator))
cursor := 0
// if there are multiple parsedValues due to '**' we need to match them individually
for i, pattern := range p.parsedValues {
patternSize := strings.Count(pattern, string(filepath.Separator)) + 1
// if we are at the last pattern we will start matching from the end of the filename
if i == len(p.parsedValues)-1 {
cursor = len(partsToMatch) - patternSize
if cursor < 0 {
return false
}
}
// the cursor will move through the fileName until the pattern matches
for j := cursor; j < len(partsToMatch); j++ {
if j+patternSize > len(partsToMatch) {
return false
}
cursor = j
subPattern := strings.Join(partsToMatch[j:j+patternSize], string(filepath.Separator))
if matchBracketPattern(pattern, subPattern) {
cursor = j + patternSize - 1
break
}
if cursor > len(partsToMatch)-patternSize-1 {
return false
}
}
}
return true
}
// we also check for the following bracket syntax: /path/*.{php,twig,yaml}
func matchBracketPattern(pattern string, fileName string) bool {
openingBracket := strings.Index(pattern, "{")
closingBracket := strings.Index(pattern, "}")
// if there are no brackets we can match regularly
if openingBracket == -1 || closingBracket == -1 {
return matchPattern(pattern, fileName)
}
beforeTheBrackets := pattern[:openingBracket]
betweenTheBrackets := pattern[openingBracket+1 : closingBracket]
afterTheBrackets := pattern[closingBracket+1:]
// all bracket entries are checked individually, only one needs to match
// *.{php,twig,yaml} -> *.php, *.twig, *.yaml
for pattern := range strings.SplitSeq(betweenTheBrackets, ",") {
if matchPattern(beforeTheBrackets+pattern+afterTheBrackets, fileName) {
return true
}
}
return false
}
func matchPattern(pattern string, fileName string) bool {
if pattern == "" {
return true
}
patternMatches, err := filepath.Match(pattern, fileName)
if err != nil {
if globalLogger.Enabled(globalCtx, slog.LevelError) {
globalLogger.LogAttrs(globalCtx, slog.LevelError, "failed to match filename", slog.String("file", fileName), slog.Any("error", err))
}
return false
}
return patternMatches
}

View File

@@ -0,0 +1,350 @@
//go:build !nowatcher
package watcher
import (
"path/filepath"
"testing"
"github.com/e-dant/watcher/watcher-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDisallowOnEventTypeBiggerThan3(t *testing.T) {
w := pattern{value: "/some/path"}
require.NoError(t, w.parse())
assert.False(t, w.allowReload(&watcher.Event{PathName: "/some/path/watch-me.php", EffectType: watcher.EffectTypeOwner}))
}
func TestDisallowOnPathTypeBiggerThan2(t *testing.T) {
w := pattern{value: "/some/path"}
require.NoError(t, w.parse())
assert.False(t, w.allowReload(&watcher.Event{PathName: "/some/path/watch-me.php", PathType: watcher.PathTypeSymLink}))
}
func TestWatchesCorrectDir(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path", "/path"},
{"/path/", "/path"},
{"/path/**/*.php", "/path"},
{"/path/*.php", "/path"},
{"/path/*/*.php", "/path"},
{"/path/?path/*.php", "/path"},
{"/path/{dir1,dir2}/**/*.php", "/path"},
{".", relativeDir(t, "")},
{"./", relativeDir(t, "")},
{"./**", relativeDir(t, "")},
{"..", relativeDir(t, "/..")},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
hasDir(t, d.pattern, d.dir)
})
}
}
func TestValidRecursiveDirectories(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path", "/path/file.php"},
{"/path", "/path/subpath/file.php"},
{"/path/", "/path/subpath/file.php"},
{"/path**", "/path/subpath/file.php"},
{"/path/**", "/path/subpath/file.php"},
{"/path/**/", "/path/subpath/file.php"},
{".", relativeDir(t, "file.php")},
{".", relativeDir(t, "subpath/file.php")},
{"./**", relativeDir(t, "subpath/file.php")},
{"..", relativeDir(t, "subpath/file.php")},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldMatch(t, d.pattern, d.dir)
})
}
}
func TestInvalidRecursiveDirectories(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path", "/other/file.php"},
{"/path/**", "/other/file.php"},
{".", "/other/file.php"},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldNotMatch(t, d.pattern, d.dir)
})
}
}
func TestValidNonRecursiveFilePatterns(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/*.php", "/file.php"},
{"/path/*.php", "/path/file.php"},
{"/path/?ile.php", "/path/file.php"},
{"/path/file.php", "/path/file.php"},
{"*.php", relativeDir(t, "file.php")},
{"./*.php", relativeDir(t, "file.php")},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldMatch(t, d.pattern, d.dir)
})
}
}
func TestInValidNonRecursiveFilePatterns(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path/*.txt", "/path/file.php"},
{"/path/*.php", "/path/subpath/file.php"},
{"/*.php", "/path/file.php"},
{"*.txt", relativeDir(t, "file.php")},
{"*.php", relativeDir(t, "subpath/file.php")},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldNotMatch(t, d.pattern, d.dir)
})
}
}
func TestValidRecursiveFilePatterns(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path/**/*.php", "/path/file.php"},
{"/path/**/*.php", "/path/subpath/file.php"},
{"/path/**/?ile.php", "/path/subpath/file.php"},
{"/path/**/file.php", "/path/subpath/file.php"},
{"**/*.php", relativeDir(t, "file.php")},
{"**/*.php", relativeDir(t, "subpath/file.php")},
{"./**/*.php", relativeDir(t, "subpath/file.php")},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldMatch(t, d.pattern, d.dir)
})
}
}
func TestInvalidRecursiveFilePatterns(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path/**/*.txt", "/path/file.php"},
{"/path/**/*.txt", "/other/file.php"},
{"/path/**/*.txt", "/path/subpath/file.php"},
{"/path/**/?ilm.php", "/path/subpath/file.php"},
{"**/*.php", "/other/file.php"},
{".**/*.php", "/other/file.php"},
{"./**/*.php", "/other/file.php"},
{"/a/**/very/long/path.php", "/a/short.php"},
{"", ""},
{"/a/**/b/c/d/**/e.php", "/a/x/e.php"},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldNotMatch(t, d.pattern, d.dir)
})
}
}
func TestValidDirectoryPatterns(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path/*/*.php", "/path/subpath/file.php"},
{"/path/*/*/*.php", "/path/subpath/subpath/file.php"},
{"/path/?/*.php", "/path/1/file.php"},
{"/path/**/vendor/*.php", "/path/vendor/file.php"},
{"/path/**/vendor/*.php", "/path/subpath/vendor/file.php"},
{"/path/**/vendor/**/*.php", "/path/vendor/file.php"},
{"/path/**/vendor/**/*.php", "/path/subpath/subpath/vendor/subpath/subpath/file.php"},
{"/path/**/vendor/*/*.php", "/path/subpath/subpath/vendor/subpath/file.php"},
{"/path*/path*/*", "/path1/path2/file.php"},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldMatch(t, d.pattern, d.dir)
})
}
}
func TestInvalidDirectoryPatterns(t *testing.T) {
t.Parallel()
data := []struct {
pattern string
dir string
}{
{"/path/subpath/*.php", "/path/other/file.php"},
{"/path/*/*.php", "/path/subpath/subpath/file.php"},
{"/path/?/*.php", "/path/subpath/file.php"},
{"/path/*/*/*.php", "/path/subpath/file.php"},
{"/path/*/*/*.php", "/path/subpath/subpath/subpath/file.php"},
{"/path/**/vendor/*.php", "/path/subpath/vendor/subpath/file.php"},
{"/path/**/vendor/*.php", "/path/subpath/file.php"},
{"/path/**/vendor/**/*.php", "/path/subpath/file.php"},
{"/path/**/vendor/**/*.txt", "/path/subpath/vendor/subpath/file.php"},
{"/path/**/vendor/**/*.php", "/path/subpath/subpath/subpath/file.php"},
{"/path/**/vendor/*/*.php", "/path/subpath/vendor/subpath/subpath/file.php"},
{"/path*/path*", "/path1/path1/file.php"},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldNotMatch(t, d.pattern, d.dir)
})
}
}
func TestValidExtendedPatterns(t *testing.T) {
data := []struct {
pattern string
dir string
}{
{"/path/*.{php}", "/path/file.php"},
{"/path/*.{php,twig}", "/path/file.php"},
{"/path/*.{php,twig}", "/path/file.twig"},
{"/path/**/{file.php,file.twig}", "/path/subpath/file.twig"},
{"/path/{dir1,dir2}/file.php", "/path/dir1/file.php"},
{"/path/{dir1,dir2}/file.php", "/path/dir2/file.php"},
{"/app/{app,config,resources}/**/*.php", "/app/app/subpath/file.php"},
{"/app/{app,config,resources}/**/*.php", "/app/config/subpath/file.php"},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldMatch(t, d.pattern, d.dir)
})
}
}
func TestInvalidExtendedPatterns(t *testing.T) {
data := []struct {
pattern string
dir string
}{
{"/path/*.{php}", "/path/file.txt"},
{"/path/*.{php,twig}", "/path/file.txt"},
{"/path/{file.php,file.twig}", "/path/file.txt"},
{"/path/{dir1,dir2}/file.php", "/path/dir3/file.php"},
{"/path/{dir1,dir2}/**/*.php", "/path/dir1/subpath/file.txt"},
}
for _, d := range data {
t.Run(d.pattern, func(t *testing.T) {
t.Parallel()
shouldNotMatch(t, d.pattern, d.dir)
})
}
}
func TestAnAssociatedEventTriggersTheWatcher(t *testing.T) {
w := pattern{value: "/**/*.php"}
require.NoError(t, w.parse())
w.events = make(chan eventHolder)
e := &watcher.Event{PathName: "/path/temporary_file", AssociatedPathName: "/path/file.php"}
go w.handle(e)
assert.Equal(t, e, (<-w.events).event)
}
func relativeDir(t *testing.T, relativePath string) string {
dir, err := filepath.Abs("./" + relativePath)
assert.NoError(t, err)
return dir
}
func hasDir(t *testing.T, p string, dir string) {
t.Helper()
w := pattern{value: p}
require.NoError(t, w.parse())
assert.Equal(t, dir, w.value)
}
func shouldMatch(t *testing.T, p string, fileName string) {
t.Helper()
w := pattern{value: p}
require.NoError(t, w.parse())
assert.True(t, w.allowReload(&watcher.Event{PathName: fileName}))
}
func shouldNotMatch(t *testing.T, p string, fileName string) {
t.Helper()
w := pattern{value: p}
require.NoError(t, w.parse())
assert.False(t, w.allowReload(&watcher.Event{PathName: fileName}))
}

View File

@@ -1,178 +0,0 @@
//go:build !nowatcher
package watcher
import (
"log/slog"
"path/filepath"
"strings"
"github.com/dunglas/frankenphp/internal/fastabs"
)
type watchPattern struct {
dir string
patterns []string
trigger chan string
failureCount int
}
func parseFilePatterns(filePatterns []string) ([]*watchPattern, error) {
watchPatterns := make([]*watchPattern, 0, len(filePatterns))
for _, filePattern := range filePatterns {
watchPattern, err := parseFilePattern(filePattern)
if err != nil {
return nil, err
}
watchPatterns = append(watchPatterns, watchPattern)
}
return watchPatterns, nil
}
// this method prepares the watchPattern struct for a single file pattern (aka /path/*pattern)
func parseFilePattern(filePattern string) (*watchPattern, error) {
w := &watchPattern{}
// first we clean the pattern
absPattern, err := fastabs.FastAbs(filePattern)
if err != nil {
return nil, err
}
w.dir = absPattern
// then we split the pattern to determine where the directory ends and the pattern starts
splitPattern := strings.Split(absPattern, string(filepath.Separator))
patternWithoutDir := ""
for i, part := range splitPattern {
isFilename := i == len(splitPattern)-1 && strings.Contains(part, ".")
isGlobCharacter := strings.ContainsAny(part, "[*?{")
if isFilename || isGlobCharacter {
patternWithoutDir = filepath.Join(splitPattern[i:]...)
w.dir = filepath.Join(splitPattern[:i]...)
break
}
}
// now we split the pattern according to the recursive '**' syntax
w.patterns = strings.Split(patternWithoutDir, "**")
for i, pattern := range w.patterns {
w.patterns[i] = strings.Trim(pattern, string(filepath.Separator))
}
// finally, we remove the trailing separator and add leading separator
w.dir = string(filepath.Separator) + strings.Trim(w.dir, string(filepath.Separator))
return w, nil
}
func (watchPattern *watchPattern) allowReload(fileName string, eventType int, pathType int) bool {
if !isValidEventType(eventType) || !isValidPathType(pathType, fileName) {
return false
}
return isValidPattern(fileName, watchPattern.dir, watchPattern.patterns)
}
// 0:rename,1:modify,2:create,3:destroy,4:owner,5:other,
func isValidEventType(eventType int) bool {
return eventType <= 3
}
// 0:dir,1:file,2:hard_link,3:sym_link,4:watcher,5:other,
func isValidPathType(pathType int, fileName string) bool {
if pathType == 4 && logger.Enabled(ctx, slog.LevelDebug) {
logger.LogAttrs(ctx, slog.LevelDebug, "special edant/watcher event", slog.String("fileName", fileName))
}
return pathType <= 2
}
func isValidPattern(fileName string, dir string, patterns []string) bool {
// first we remove the dir from the pattern
if !strings.HasPrefix(fileName, dir) {
return false
}
// remove the dir and separator from the filename
fileNameWithoutDir := strings.TrimPrefix(strings.TrimPrefix(fileName, dir), string(filepath.Separator))
// if the pattern has size 1 we can match it directly against the filename
if len(patterns) == 1 {
return matchBracketPattern(patterns[0], fileNameWithoutDir)
}
return matchPatterns(patterns, fileNameWithoutDir)
}
func matchPatterns(patterns []string, fileName string) bool {
partsToMatch := strings.Split(fileName, string(filepath.Separator))
cursor := 0
// if there are multiple patterns due to '**' we need to match them individually
for i, pattern := range patterns {
patternSize := strings.Count(pattern, string(filepath.Separator)) + 1
// if we are at the last pattern we will start matching from the end of the filename
if i == len(patterns)-1 {
cursor = len(partsToMatch) - patternSize
}
// the cursor will move through the fileName until the pattern matches
for j := cursor; j < len(partsToMatch); j++ {
cursor = j
subPattern := strings.Join(partsToMatch[j:j+patternSize], string(filepath.Separator))
if matchBracketPattern(pattern, subPattern) {
cursor = j + patternSize - 1
break
}
if cursor > len(partsToMatch)-patternSize-1 {
return false
}
}
}
return true
}
// we also check for the following bracket syntax: /path/*.{php,twig,yaml}
func matchBracketPattern(pattern string, fileName string) bool {
openingBracket := strings.Index(pattern, "{")
closingBracket := strings.Index(pattern, "}")
// if there are no brackets we can match regularly
if openingBracket == -1 || closingBracket == -1 {
return matchPattern(pattern, fileName)
}
beforeTheBrackets := pattern[:openingBracket]
betweenTheBrackets := pattern[openingBracket+1 : closingBracket]
afterTheBrackets := pattern[closingBracket+1:]
// all bracket entries are checked individually, only one needs to match
// *.{php,twig,yaml} -> *.php, *.twig, *.yaml
for pattern := range strings.SplitSeq(betweenTheBrackets, ",") {
if matchPattern(beforeTheBrackets+pattern+afterTheBrackets, fileName) {
return true
}
}
return false
}
func matchPattern(pattern string, fileName string) bool {
if pattern == "" {
return true
}
patternMatches, err := filepath.Match(pattern, fileName)
if err != nil {
if logger.Enabled(ctx, slog.LevelError) {
logger.LogAttrs(ctx, slog.LevelError, "failed to match filename", slog.String("file", fileName), slog.Any("error", err))
}
return false
}
return patternMatches
}

View File

@@ -1,187 +0,0 @@
//go:build !nowatcher
package watcher
import (
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDisallowOnEventTypeBiggerThan3(t *testing.T) {
const fileName = "/some/path/watch-me.php"
const eventType = 4
watchPattern, err := parseFilePattern("/some/path")
assert.NoError(t, err)
assert.False(t, watchPattern.allowReload(fileName, eventType, 0))
}
func TestDisallowOnPathTypeBiggerThan2(t *testing.T) {
const fileName = "/some/path/watch-me.php"
const pathType = 3
watchPattern, err := parseFilePattern("/some/path")
assert.NoError(t, err)
assert.False(t, watchPattern.allowReload(fileName, 0, pathType))
}
func TestWatchesCorrectDir(t *testing.T) {
hasDir(t, "/path", "/path")
hasDir(t, "/path/", "/path")
hasDir(t, "/path/**/*.php", "/path")
hasDir(t, "/path/*.php", "/path")
hasDir(t, "/path/*/*.php", "/path")
hasDir(t, "/path/?dir/*.php", "/path")
hasDir(t, "/path/{dir1,dir2}/**/*.php", "/path")
hasDir(t, ".", relativeDir(t, ""))
hasDir(t, "./", relativeDir(t, ""))
hasDir(t, "./**", relativeDir(t, ""))
hasDir(t, "..", relativeDir(t, "/.."))
}
func TestValidRecursiveDirectories(t *testing.T) {
shouldMatch(t, "/path", "/path/file.php")
shouldMatch(t, "/path", "/path/subpath/file.php")
shouldMatch(t, "/path/", "/path/subpath/file.php")
shouldMatch(t, "/path**", "/path/subpath/file.php")
shouldMatch(t, "/path/**", "/path/subpath/file.php")
shouldMatch(t, "/path/**/", "/path/subpath/file.php")
shouldMatch(t, ".", relativeDir(t, "file.php"))
shouldMatch(t, ".", relativeDir(t, "subpath/file.php"))
shouldMatch(t, "./**", relativeDir(t, "subpath/file.php"))
shouldMatch(t, "..", relativeDir(t, "subpath/file.php"))
}
func TestInvalidRecursiveDirectories(t *testing.T) {
shouldNotMatch(t, "/path", "/other/file.php")
shouldNotMatch(t, "/path/**", "/other/file.php")
shouldNotMatch(t, ".", "/other/file.php")
}
func TestValidNonRecursiveFilePatterns(t *testing.T) {
shouldMatch(t, "/*.php", "/file.php")
shouldMatch(t, "/path/*.php", "/path/file.php")
shouldMatch(t, "/path/?ile.php", "/path/file.php")
shouldMatch(t, "/path/file.php", "/path/file.php")
shouldMatch(t, "*.php", relativeDir(t, "file.php"))
shouldMatch(t, "./*.php", relativeDir(t, "file.php"))
}
func TestInValidNonRecursiveFilePatterns(t *testing.T) {
shouldNotMatch(t, "/path/*.txt", "/path/file.php")
shouldNotMatch(t, "/path/*.php", "/path/subpath/file.php")
shouldNotMatch(t, "/*.php", "/path/file.php")
shouldNotMatch(t, "*.txt", relativeDir(t, "file.php"))
shouldNotMatch(t, "*.php", relativeDir(t, "subpath/file.php"))
}
func TestValidRecursiveFilePatterns(t *testing.T) {
shouldMatch(t, "/path/**/*.php", "/path/file.php")
shouldMatch(t, "/path/**/*.php", "/path/subpath/file.php")
shouldMatch(t, "/path/**/?ile.php", "/path/subpath/file.php")
shouldMatch(t, "/path/**/file.php", "/path/subpath/file.php")
shouldMatch(t, "**/*.php", relativeDir(t, "file.php"))
shouldMatch(t, "**/*.php", relativeDir(t, "subpath/file.php"))
shouldMatch(t, "./**/*.php", relativeDir(t, "subpath/file.php"))
}
func TestInvalidRecursiveFilePatterns(t *testing.T) {
shouldNotMatch(t, "/path/**/*.txt", "/path/file.php")
shouldNotMatch(t, "/path/**/*.txt", "/other/file.php")
shouldNotMatch(t, "/path/**/*.txt", "/path/subpath/file.php")
shouldNotMatch(t, "/path/**/?ilm.php", "/path/subpath/file.php")
shouldNotMatch(t, "**/*.php", "/other/file.php")
shouldNotMatch(t, ".**/*.php", "/other/file.php")
shouldNotMatch(t, "./**/*.php", "/other/file.php")
}
func TestValidDirectoryPatterns(t *testing.T) {
shouldMatch(t, "/path/*/*.php", "/path/subpath/file.php")
shouldMatch(t, "/path/*/*/*.php", "/path/subpath/subpath/file.php")
shouldMatch(t, "/path/?/*.php", "/path/1/file.php")
shouldMatch(t, "/path/**/vendor/*.php", "/path/vendor/file.php")
shouldMatch(t, "/path/**/vendor/*.php", "/path/subpath/vendor/file.php")
shouldMatch(t, "/path/**/vendor/**/*.php", "/path/vendor/file.php")
shouldMatch(t, "/path/**/vendor/**/*.php", "/path/subpath/subpath/vendor/subpath/subpath/file.php")
shouldMatch(t, "/path/**/vendor/*/*.php", "/path/subpath/subpath/vendor/subpath/file.php")
shouldMatch(t, "/path*/path*/*", "/path1/path2/file.php")
}
func TestInvalidDirectoryPatterns(t *testing.T) {
shouldNotMatch(t, "/path/subpath/*.php", "/path/other/file.php")
shouldNotMatch(t, "/path/*/*.php", "/path/subpath/subpath/file.php")
shouldNotMatch(t, "/path/?/*.php", "/path/subpath/file.php")
shouldNotMatch(t, "/path/*/*/*.php", "/path/subpath/file.php")
shouldNotMatch(t, "/path/*/*/*.php", "/path/subpath/subpath/subpath/file.php")
shouldNotMatch(t, "/path/**/vendor/*.php", "/path/subpath/vendor/subpath/file.php")
shouldNotMatch(t, "/path/**/vendor/*.php", "/path/subpath/file.php")
shouldNotMatch(t, "/path/**/vendor/**/*.php", "/path/subpath/file.php")
shouldNotMatch(t, "/path/**/vendor/**/*.txt", "/path/subpath/vendor/subpath/file.php")
shouldNotMatch(t, "/path/**/vendor/**/*.php", "/path/subpath/subpath/subpath/file.php")
shouldNotMatch(t, "/path/**/vendor/*/*.php", "/path/subpath/vendor/subpath/subpath/file.php")
shouldNotMatch(t, "/path*/path*", "/path1/path1/file.php")
}
func TestValidExtendedPatterns(t *testing.T) {
shouldMatch(t, "/path/*.{php}", "/path/file.php")
shouldMatch(t, "/path/*.{php,twig}", "/path/file.php")
shouldMatch(t, "/path/*.{php,twig}", "/path/file.twig")
shouldMatch(t, "/path/**/{file.php,file.twig}", "/path/subpath/file.twig")
shouldMatch(t, "/path/{dir1,dir2}/file.php", "/path/dir1/file.php")
shouldMatch(t, "/path/{dir1,dir2}/file.php", "/path/dir2/file.php")
shouldMatch(t, "/app/{app,config,resources}/**/*.php", "/app/app/subpath/file.php")
shouldMatch(t, "/app/{app,config,resources}/**/*.php", "/app/config/subpath/file.php")
}
func TestInValidExtendedPatterns(t *testing.T) {
shouldNotMatch(t, "/path/*.{php}", "/path/file.txt")
shouldNotMatch(t, "/path/*.{php,twig}", "/path/file.txt")
shouldNotMatch(t, "/path/{file.php,file.twig}", "/path/file.txt")
shouldNotMatch(t, "/path/{dir1,dir2}/file.php", "/path/dir3/file.php")
shouldNotMatch(t, "/path/{dir1,dir2}/**/*.php", "/path/dir1/subpath/file.txt")
}
func TestAnAssociatedEventTriggersTheWatcher(t *testing.T) {
watchPattern, err := parseFilePattern("/**/*.php")
assert.NoError(t, err)
watchPattern.trigger = make(chan string)
go handleWatcherEvent(watchPattern, "/path/temorary_file", "/path/file.php", 0, 0)
var path string
select {
case path = <-watchPattern.trigger:
assert.Equal(t, "/path/file.php", path, "should be associated file path")
case <-time.After(2 * time.Second):
assert.Fail(t, "associated watchPattern did not trigger after 2s")
}
}
func relativeDir(t *testing.T, relativePath string) string {
dir, err := filepath.Abs("./" + relativePath)
assert.NoError(t, err)
return dir
}
func hasDir(t *testing.T, pattern string, dir string) {
watchPattern, err := parseFilePattern(pattern)
assert.NoError(t, err)
assert.Equal(t, dir, watchPattern.dir)
}
func shouldMatch(t *testing.T, pattern string, fileName string) {
watchPattern, err := parseFilePattern(pattern)
assert.NoError(t, err)
assert.True(t, watchPattern.allowReload(fileName, 0, 0))
}
func shouldNotMatch(t *testing.T, pattern string, fileName string) {
watchPattern, err := parseFilePattern(pattern)
assert.NoError(t, err)
assert.False(t, watchPattern.allowReload(fileName, 0, 0))
}

View File

@@ -1,17 +0,0 @@
//go:build nowatcher
package watcher
import (
"context"
"log/slog"
)
func InitWatcher(ct context.Context, filePatterns []string, callback func(), logger *slog.Logger) error {
logger.Error("watcher support is not enabled")
return nil
}
func DrainWatcher() {
}

View File

@@ -1,26 +0,0 @@
// clang-format off
//go:build !nowatcher
// clang-format on
#include "_cgo_export.h"
#include "wtr/watcher-c.h"
void handle_event(struct wtr_watcher_event event, void *data) {
go_handle_file_watcher_event(
(char *)event.path_name, (char *)event.associated_path_name,
event.effect_type, event.path_type, (uintptr_t)data);
}
uintptr_t start_new_watcher(char const *const path, uintptr_t data) {
void *watcher = wtr_watcher_open(path, handle_event, (void *)data);
if (watcher == NULL) {
return 0;
}
return (uintptr_t)watcher;
}
int stop_watcher(uintptr_t watcher) {
if (!wtr_watcher_close((void *)watcher)) {
return 0;
}
return 1;
}

View File

@@ -2,55 +2,60 @@
package watcher
// #include <stdint.h>
// #include <stdlib.h>
// #include "watcher.h"
import "C"
import (
"context"
"errors"
"log/slog"
"runtime/cgo"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/e-dant/watcher/watcher-go"
)
type watcher struct {
sessions []C.uintptr_t
callback func()
trigger chan string
stop chan struct{}
}
// duration to wait before triggering a reload after a file change
const debounceDuration = 150 * time.Millisecond
// times to retry watching if the watcher was closed prematurely
const maxFailureCount = 5
const failureResetDuration = 5 * time.Second
var failureMu = sync.Mutex{}
var watcherIsActive = atomic.Bool{}
const (
// duration to wait before triggering a reload after a file change
debounceDuration = 150 * time.Millisecond
// times to retry watching if the watcher was closed prematurely
maxFailureCount = 5
failureResetDuration = 5 * time.Second
)
var (
ErrAlreadyStarted = errors.New("the watcher is already running")
ErrUnableToStartWatching = errors.New("unable to start the watcher")
ErrAlreadyStarted = errors.New("watcher is already running")
failureMu sync.Mutex
watcherIsActive atomic.Bool
// the currently active file watcher
activeWatcher *watcher
activeWatcher *globalWatcher
// after stopping the watcher we will wait for eventual reloads to finish
reloadWaitGroup sync.WaitGroup
// we are passing the context from the main package to the watcher
ctx context.Context
// we are passing the logger from the main package to the watcher
logger *slog.Logger
globalCtx context.Context
// we are passing the globalLogger from the main package to the watcher
globalLogger *slog.Logger
)
func InitWatcher(ct context.Context, filePatterns []string, callback func(), slogger *slog.Logger) error {
if len(filePatterns) == 0 {
type PatternGroup struct {
Patterns []string
Callback func([]*watcher.Event)
}
type eventHolder struct {
patternGroup *PatternGroup
event *watcher.Event
}
type globalWatcher struct {
groups []*PatternGroup
watchers []*pattern
events chan eventHolder
stop chan struct{}
}
func InitWatcher(ct context.Context, slogger *slog.Logger, groups []*PatternGroup) error {
if len(groups) == 0 {
return nil
}
@@ -59,11 +64,22 @@ func InitWatcher(ct context.Context, filePatterns []string, callback func(), slo
}
watcherIsActive.Store(true)
ctx = ct
logger = slogger
activeWatcher = &watcher{callback: callback}
globalCtx = ct
globalLogger = slogger
if err := activeWatcher.startWatching(ctx, filePatterns); err != nil {
activeWatcher = &globalWatcher{groups: groups}
for _, g := range groups {
if len(g.Patterns) == 0 {
continue
}
for _, p := range g.Patterns {
activeWatcher.watchers = append(activeWatcher.watchers, &pattern{patternGroup: g, value: p})
}
}
if err := activeWatcher.startWatching(); err != nil {
return err
}
@@ -77,8 +93,8 @@ func DrainWatcher() {
watcherIsActive.Store(false)
if logger.Enabled(ctx, slog.LevelDebug) {
logger.LogAttrs(ctx, slog.LevelDebug, "stopping watcher")
if globalLogger.Enabled(globalCtx, slog.LevelDebug) {
globalLogger.LogAttrs(globalCtx, slog.LevelDebug, "stopping watcher")
}
activeWatcher.stopWatching()
@@ -87,142 +103,119 @@ func DrainWatcher() {
}
// TODO: how to test this?
func retryWatching(watchPattern *watchPattern) {
func (p *pattern) retryWatching() {
failureMu.Lock()
defer failureMu.Unlock()
if watchPattern.failureCount >= maxFailureCount {
if logger.Enabled(ctx, slog.LevelWarn) {
logger.LogAttrs(ctx, slog.LevelWarn, "giving up watching", slog.String("dir", watchPattern.dir))
if p.failureCount >= maxFailureCount {
if globalLogger.Enabled(globalCtx, slog.LevelWarn) {
globalLogger.LogAttrs(globalCtx, slog.LevelWarn, "giving up watching", slog.String("pattern", p.value))
}
return
}
if logger.Enabled(ctx, slog.LevelInfo) {
logger.LogAttrs(ctx, slog.LevelInfo, "watcher was closed prematurely, retrying...", slog.String("dir", watchPattern.dir))
if globalLogger.Enabled(globalCtx, slog.LevelInfo) {
globalLogger.LogAttrs(globalCtx, slog.LevelInfo, "watcher was closed prematurely, retrying...", slog.String("pattern", p.value))
}
watchPattern.failureCount++
session, err := startSession(watchPattern)
if err != nil {
activeWatcher.sessions = append(activeWatcher.sessions, session)
}
p.failureCount++
p.startSession()
// reset the failure-count if the watcher hasn't reached max failures after 5 seconds
go func() {
time.Sleep(failureResetDuration * time.Second)
time.Sleep(failureResetDuration)
failureMu.Lock()
if watchPattern.failureCount < maxFailureCount {
watchPattern.failureCount = 0
if p.failureCount < maxFailureCount {
p.failureCount = 0
}
failureMu.Unlock()
}()
}
func (w *watcher) startWatching(ctx context.Context, filePatterns []string) error {
w.trigger = make(chan string)
w.stop = make(chan struct{})
w.sessions = make([]C.uintptr_t, len(filePatterns))
watchPatterns, err := parseFilePatterns(filePatterns)
if err != nil {
func (g *globalWatcher) startWatching() error {
g.events = make(chan eventHolder)
g.stop = make(chan struct{})
if err := g.parseFilePatterns(); err != nil {
return err
}
for i, watchPattern := range watchPatterns {
watchPattern.trigger = w.trigger
session, err := startSession(watchPattern)
if err != nil {
return err
}
w.sessions[i] = session
for _, w := range g.watchers {
w.events = g.events
w.startSession()
}
go listenForFileEvents(w.trigger, w.stop)
go g.listenForFileEvents()
return nil
}
func (w *watcher) stopWatching() {
close(w.stop)
for _, session := range w.sessions {
stopSession(session)
}
}
func startSession(w *watchPattern) (C.uintptr_t, error) {
handle := cgo.NewHandle(w)
cDir := C.CString(w.dir)
defer C.free(unsafe.Pointer(cDir))
watchSession := C.start_new_watcher(cDir, C.uintptr_t(handle))
if watchSession != 0 {
if logger.Enabled(ctx, slog.LevelDebug) {
logger.LogAttrs(ctx, slog.LevelDebug, "watching", slog.String("dir", w.dir), slog.Any("patterns", w.patterns))
func (g *globalWatcher) parseFilePatterns() error {
for _, w := range g.watchers {
if err := w.parse(); err != nil {
return err
}
return watchSession, nil
}
if logger.Enabled(ctx, slog.LevelError) {
logger.LogAttrs(ctx, slog.LevelError, "couldn't start watching", slog.String("dir", w.dir))
}
return watchSession, ErrUnableToStartWatching
return nil
}
func stopSession(session C.uintptr_t) {
success := C.stop_watcher(session)
if success == 0 && logger.Enabled(ctx, slog.LevelWarn) {
logger.LogAttrs(ctx, slog.LevelWarn, "couldn't close the watcher")
func (g *globalWatcher) stopWatching() {
close(g.stop)
for _, w := range g.watchers {
w.stop()
}
}
//export go_handle_file_watcher_event
func go_handle_file_watcher_event(path *C.char, associatedPath *C.char, eventType C.int, pathType C.int, handle C.uintptr_t) {
watchPattern := cgo.Handle(handle).Value().(*watchPattern)
handleWatcherEvent(watchPattern, C.GoString(path), C.GoString(associatedPath), int(eventType), int(pathType))
}
func handleWatcherEvent(watchPattern *watchPattern, path string, associatedPath string, eventType int, pathType int) {
// If the watcher prematurely sends the die@ event, retry watching
if pathType == 4 && strings.HasPrefix(path, "e/self/die@") && watcherIsActive.Load() {
retryWatching(watchPattern)
return
}
if watchPattern.allowReload(path, eventType, pathType) {
watchPattern.trigger <- path
return
}
// some editors create temporary files and never actually modify the original file
// so we need to also check the associated path of an event
// see https://github.com/php/frankenphp/issues/1375
if associatedPath != "" && watchPattern.allowReload(associatedPath, eventType, pathType) {
watchPattern.trigger <- associatedPath
}
}
func listenForFileEvents(triggerWatcher chan string, stopWatcher chan struct{}) {
func (g *globalWatcher) listenForFileEvents() {
timer := time.NewTimer(debounceDuration)
timer.Stop()
lastChangedFile := ""
eventsPerGroup := make(map[*PatternGroup][]*watcher.Event, len(g.groups))
defer timer.Stop()
for {
select {
case <-stopWatcher:
case lastChangedFile = <-triggerWatcher:
case <-g.stop:
return
case eh := <-g.events:
timer.Reset(debounceDuration)
eventsPerGroup[eh.patternGroup] = append(eventsPerGroup[eh.patternGroup], eh.event)
case <-timer.C:
timer.Stop()
if logger.Enabled(ctx, slog.LevelInfo) {
logger.LogAttrs(ctx, slog.LevelInfo, "filesystem change detected", slog.String("file", lastChangedFile))
if globalLogger.Enabled(globalCtx, slog.LevelInfo) {
var events []*watcher.Event
for _, eventList := range eventsPerGroup {
events = append(events, eventList...)
}
globalLogger.LogAttrs(globalCtx, slog.LevelInfo, "filesystem changes detected", slog.Any("events", events))
}
scheduleReload()
g.scheduleReload(eventsPerGroup)
eventsPerGroup = make(map[*PatternGroup][]*watcher.Event, len(g.groups))
}
}
}
func scheduleReload() {
func (g *globalWatcher) scheduleReload(eventsPerGroup map[*PatternGroup][]*watcher.Event) {
reloadWaitGroup.Add(1)
activeWatcher.callback()
// Call callbacks in order
for _, g := range g.groups {
if len(g.Patterns) == 0 {
g.Callback(nil)
}
if e, ok := eventsPerGroup[g]; ok {
g.Callback(e)
}
}
reloadWaitGroup.Done()
}

View File

@@ -1,6 +0,0 @@
#include <stdint.h>
#include <stdlib.h>
uintptr_t start_new_watcher(char const *const path, uintptr_t data);
int stop_watcher(uintptr_t watcher);

View File

@@ -5,14 +5,14 @@ package frankenphp
// #include <stdint.h>
// #include <php.h>
import "C"
import (
"unsafe"
)
type mercureContext struct {
}
//export go_mercure_publish
func go_mercure_publish(_ C.uintptr_t, _ *C.struct__zval_struct, _ unsafe.Pointer, _ bool, _, _ unsafe.Pointer, _ uint64) (*C.zend_string, C.short) {
func go_mercure_publish(threadIndex C.uintptr_t, topics *C.struct__zval_struct, data *C.zend_string, private bool, id, typ *C.zend_string, retry uint64) (generatedID *C.zend_string, error C.short) {
return nil, 3
}
func (w *worker) configureMercure(_ *workerOpt) {
}

View File

@@ -38,6 +38,7 @@ func go_mercure_publish(threadIndex C.uintptr_t, topics *C.struct__zval_struct,
Type: GoString(unsafe.Pointer(typ)),
},
Private: private,
Debug: fc.logger.Enabled(ctx, slog.LevelDebug),
}
zvalType := C.zval_get_type(topics)
@@ -71,6 +72,14 @@ func go_mercure_publish(threadIndex C.uintptr_t, topics *C.struct__zval_struct,
return (*C.zend_string)(PHPString(u.ID, false)), 0
}
func (w *worker) configureMercure(o *workerOpt) {
if o.mercureHub == nil {
return
}
w.mercureHub = o.mercureHub
}
// WithMercureHub sets the mercure.Hub to use to publish updates
func WithMercureHub(hub *mercure.Hub) RequestOption {
return func(o *frankenPHPContext) error {
@@ -79,3 +88,14 @@ func WithMercureHub(hub *mercure.Hub) RequestOption {
return nil
}
}
// WithWorkerMercureHub sets the mercure.Hub in the worker script and used to dispatch hot reloading-related mercure.Update.
func WithWorkerMercureHub(hub *mercure.Hub) WorkerOption {
return func(w *workerOpt) error {
w.mercureHub = hub
w.requestOptions = append(w.requestOptions, WithMercureHub(hub))
return nil
}
}

View File

@@ -20,6 +20,8 @@ type WorkerOption func(*workerOpt) error
//
// If you change this, also update the Caddy module and the documentation.
type opt struct {
hotReloadOpt
ctx context.Context
numThreads int
maxThreads int
@@ -31,6 +33,8 @@ type opt struct {
}
type workerOpt struct {
mercureContext
name string
fileName string
num int

View File

@@ -93,10 +93,15 @@ func TestTransitionAThreadBetween2DifferentWorkers(t *testing.T) {
// try all possible handler transitions
// takes around 200ms and is supposed to force race conditions
func TestTransitionThreadsWhileDoingRequests(t *testing.T) {
t.Cleanup(Shutdown)
var (
isDone atomic.Bool
wg sync.WaitGroup
)
numThreads := 10
numRequestsPerThread := 100
isDone := atomic.Bool{}
wg := sync.WaitGroup{}
worker1Path := testDataPath + "/transition-worker-1.php"
worker1Name := "worker-1"
worker2Path := testDataPath + "/transition-worker-2.php"
@@ -155,7 +160,6 @@ func TestTransitionThreadsWhileDoingRequests(t *testing.T) {
// we are finished as soon as all 1000 requests are done
wg.Wait()
isDone.Store(true)
Shutdown()
}
func TestFinishBootingAWorkerScript(t *testing.T) {

View File

@@ -11,6 +11,8 @@ import (
)
func TestScaleARegularThreadUpAndDown(t *testing.T) {
t.Cleanup(Shutdown)
assert.NoError(t, Init(
WithNumThreads(1),
WithMaxThreads(2),
@@ -25,14 +27,14 @@ func TestScaleARegularThreadUpAndDown(t *testing.T) {
assert.IsType(t, &regularThread{}, autoScaledThread.handler)
// on down-scale, the thread will be marked as inactive
setLongWaitTime(autoScaledThread)
setLongWaitTime(t, autoScaledThread)
deactivateThreads()
assert.IsType(t, &inactiveThread{}, autoScaledThread.handler)
Shutdown()
}
func TestScaleAWorkerThreadUpAndDown(t *testing.T) {
t.Cleanup(Shutdown)
workerName := "worker1"
workerPath := testDataPath + "/transition-worker-1.php"
assert.NoError(t, Init(
@@ -53,13 +55,13 @@ func TestScaleAWorkerThreadUpAndDown(t *testing.T) {
assert.Equal(t, state.Ready, autoScaledThread.state.Get())
// on down-scale, the thread will be marked as inactive
setLongWaitTime(autoScaledThread)
setLongWaitTime(t, autoScaledThread)
deactivateThreads()
assert.IsType(t, &inactiveThread{}, autoScaledThread.handler)
Shutdown()
}
func setLongWaitTime(thread *phpThread) {
func setLongWaitTime(t *testing.T, thread *phpThread) {
t.Helper()
thread.state.SetWaitTime(time.Now().Add(-time.Hour))
}

23
watcher-skip.go Normal file
View File

@@ -0,0 +1,23 @@
//go:build nowatcher
package frankenphp
import "errors"
type hotReloadOpt struct {
}
var errWatcherNotEnabled = errors.New("watcher support is not enabled")
func initWatchers(o *opt) error {
for _, o := range o.workers {
if len(o.watch) != 0 {
return errWatcherNotEnabled
}
}
return nil
}
func drainWatchers() {
}

47
watcher.go Normal file
View File

@@ -0,0 +1,47 @@
//go:build !nowatcher
package frankenphp
import (
"sync/atomic"
"github.com/dunglas/frankenphp/internal/watcher"
watcherGo "github.com/e-dant/watcher/watcher-go"
)
type hotReloadOpt struct {
hotReload []*watcher.PatternGroup
}
var restartWorkers atomic.Bool
func initWatchers(o *opt) error {
watchPatterns := make([]*watcher.PatternGroup, 0, len(o.hotReload))
for _, o := range o.workers {
if len(o.watch) == 0 {
continue
}
watcherIsEnabled = true
watchPatterns = append(watchPatterns, &watcher.PatternGroup{Patterns: o.watch, Callback: func(_ []*watcherGo.Event) {
restartWorkers.Store(true)
}})
}
if watcherIsEnabled {
watchPatterns = append(watchPatterns, &watcher.PatternGroup{
Callback: func(_ []*watcherGo.Event) {
if restartWorkers.Swap(false) {
RestartWorkers()
}
},
})
}
return watcher.InitWatcher(globalCtx, globalLogger, append(watchPatterns, o.hotReload...))
}
func drainWatchers() {
watcher.DrainWatcher()
}

View File

@@ -11,6 +11,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// we have to wait a few milliseconds for the watcher debounce to take effect
@@ -41,6 +42,8 @@ func TestWorkersShouldNotReloadOnExcludingPattern(t *testing.T) {
}
func pollForWorkerReset(t *testing.T, handler func(http.ResponseWriter, *http.Request), limit int) bool {
t.Helper()
// first we make an initial request to start the request counter
body, _ := testGet("http://example.com/worker-with-counter.php", handler, t)
assert.Equal(t, "requests:1", body)
@@ -54,18 +57,19 @@ func pollForWorkerReset(t *testing.T, handler func(http.ResponseWriter, *http.Re
return true
}
}
return false
}
func updateTestFile(fileName string, content string, t *testing.T) {
absFileName, err := filepath.Abs(fileName)
assert.NoError(t, err)
require.NoError(t, err)
dirName := filepath.Dir(absFileName)
if _, err := os.Stat(dirName); os.IsNotExist(err) {
if _, err = os.Stat(dirName); os.IsNotExist(err) {
err = os.MkdirAll(dirName, 0700)
assert.NoError(t, err)
}
bytes := []byte(content)
err = os.WriteFile(absFileName, bytes, 0644)
assert.NoError(t, err)
require.NoError(t, err)
require.NoError(t, os.WriteFile(absFileName, []byte(content), 0644))
}

View File

@@ -14,11 +14,12 @@ import (
"github.com/dunglas/frankenphp/internal/fastabs"
"github.com/dunglas/frankenphp/internal/state"
"github.com/dunglas/frankenphp/internal/watcher"
)
// represents a worker script and can have many threads assigned to it
type worker struct {
mercureContext
name string
fileName string
num int
@@ -37,25 +38,31 @@ type worker struct {
var (
workers []*worker
watcherIsEnabled bool
startupFailChan chan (error)
startupFailChan chan error
)
func initWorkers(opt []workerOpt) error {
if len(opt) == 0 {
return nil
}
var (
workersReady sync.WaitGroup
totalThreadsToStart int
)
workers = make([]*worker, 0, len(opt))
directoriesToWatch := getDirectoriesToWatch(opt)
watcherIsEnabled = len(directoriesToWatch) > 0
totalThreadsToStart := 0
for _, o := range opt {
w, err := newWorker(o)
if err != nil {
return err
}
totalThreadsToStart += w.num
workers = append(workers, w)
}
var workersReady sync.WaitGroup
startupFailChan = make(chan error, totalThreadsToStart)
for _, w := range workers {
@@ -73,22 +80,13 @@ func initWorkers(opt []workerOpt) error {
select {
case err := <-startupFailChan:
// at least 1 worker has failed, shut down and return an error
Shutdown()
// at least 1 worker has failed, return an error
return fmt.Errorf("failed to initialize workers: %w", err)
default:
// all workers started successfully
startupFailChan = nil
}
if !watcherIsEnabled {
return nil
}
if err := watcher.InitWatcher(globalCtx, directoriesToWatch, RestartWorkers, globalLogger); err != nil {
return err
}
return nil
}
@@ -156,6 +154,8 @@ func newWorker(o workerOpt) (*worker, error) {
onThreadShutdown: o.onThreadShutdown,
}
w.configureMercure(&o)
w.requestOptions = append(
w.requestOptions,
WithRequestDocumentRoot(filepath.Dir(o.fileName), false),
@@ -175,39 +175,43 @@ func DrainWorkers() {
}
func drainWorkerThreads() []*phpThread {
ready := sync.WaitGroup{}
drainedThreads := make([]*phpThread, 0)
var (
ready sync.WaitGroup
drainedThreads []*phpThread
)
for _, worker := range workers {
worker.threadMutex.RLock()
ready.Add(len(worker.threads))
for _, thread := range worker.threads {
if !thread.state.RequestSafeStateChange(state.Restarting) {
ready.Done()
// no state change allowed == thread is shutting down
// we'll proceed to restart all other threads anyways
// we'll proceed to restart all other threads anyway
continue
}
close(thread.drainChan)
drainedThreads = append(drainedThreads, thread)
go func(thread *phpThread) {
thread.state.WaitFor(state.Yielding)
ready.Done()
}(thread)
}
worker.threadMutex.RUnlock()
}
ready.Wait()
return drainedThreads
}
func drainWatcher() {
if watcherIsEnabled {
watcher.DrainWatcher()
}
}
// RestartWorkers attempts to restart all workers gracefully
// All workers must be restarted at the same time to prevent issues with opcache resetting.
func RestartWorkers() {
// disallow scaling threads while restarting workers
scalingMu.Lock()
@@ -221,14 +225,6 @@ func RestartWorkers() {
}
}
func getDirectoriesToWatch(workerOpts []workerOpt) []string {
directoriesToWatch := []string{}
for _, w := range workerOpts {
directoriesToWatch = append(directoriesToWatch, w.watch...)
}
return directoriesToWatch
}
func (worker *worker) attachThread(thread *phpThread) {
worker.threadMutex.Lock()
worker.threads = append(worker.threads, thread)

View File

@@ -10,6 +10,8 @@ import (
)
func TestWorkersExtension(t *testing.T) {
t.Cleanup(Shutdown)
readyWorkers := 0
shutdownWorkers := 0
serverStarts := 0