1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

bump(vendor)

This commit is contained in:
Jan Chaloupka
2024-06-23 17:13:02 +02:00
parent 3a1a3ff9d8
commit 7d4ec60e2d
267 changed files with 67229 additions and 152 deletions

191
vendor/github.com/golang/glog/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

36
vendor/github.com/golang/glog/README.md generated vendored Normal file
View File

@@ -0,0 +1,36 @@
# glog
[![PkgGoDev](https://pkg.go.dev/badge/github.com/golang/glog)](https://pkg.go.dev/github.com/golang/glog)
Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package [_glog_](https://github.com/google/glog).
By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the `-vmodule` flag, the package also provides fine-grained
control over logging at the file level.
The comment from `glog.go` introduces the ideas:
Package _glog_ implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides the functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style loggingcontrolled by the `-v` and `-vmodule=file=2` flags.
Basic examples:
```go
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
```
See the documentation for the V function for an explanation of these examples:
```go
if glog.V(2) {
glog.Info("Starting transaction...")
}
glog.V(2).Infoln("Processed", nItems, "elements")
```
The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored.
Send bug reports to golang-nuts@googlegroups.com.

777
vendor/github.com/golang/glog/glog.go generated vendored Normal file
View File

@@ -0,0 +1,777 @@
// Go support for leveled logs, analogous to https://github.com/google/glog.
//
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
// It provides functions that have a name matched by regex:
//
// (Info|Warning|Error|Fatal)(Context)?(Depth)?(f)?
//
// If Context is present, function takes context.Context argument. The
// context is used to pass through the Trace Context to log sinks that can make use
// of it.
// It is recommended to use the context variant of the functions over the non-context
// variants if a context is available to make sure the Trace Contexts are present
// in logs.
//
// If Depth is present, this function calls log from a different depth in the call stack.
// This enables a callee to emit logs that use the callsite information of its caller
// or any other callers in the stack. When depth == 0, the original callee's line
// information is emitted. When depth > 0, depth frames are skipped in the call stack
// and the final frame is treated like the original callee to Info.
//
// If 'f' is present, function formats according to a format specifier.
//
// This package also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
//
// Basic examples:
//
// glog.Info("Prepare to repel boarders")
//
// glog.Fatalf("Initialization failed: %s", err)
//
// See the documentation for the V function for an explanation of these examples:
//
// if glog.V(2) {
// glog.Info("Starting transaction...")
// }
//
// glog.V(2).Infoln("Processed", nItems, "elements")
//
// Log output is buffered and written periodically using Flush. Programs
// should call Flush before exiting to guarantee all log output is written.
//
// By default, all log statements write to files in a temporary directory.
// This package provides several flags that modify this behavior.
// As a result, flag.Parse must be called before any logging is done.
//
// -logtostderr=false
// Logs are written to standard error instead of to files.
// -alsologtostderr=false
// Logs are written to standard error as well as to files.
// -stderrthreshold=ERROR
// Log events at or above this severity are logged to standard
// error as well as to files.
// -log_dir=""
// Log files will be written to this directory instead of the
// default temporary directory.
//
// Other flags provide aids to debugging.
//
// -log_backtrace_at=""
// A comma-separated list of file and line numbers holding a logging
// statement, such as
// -log_backtrace_at=gopherflakes.go:234
// A stack trace will be written to the Info log whenever execution
// hits one of these statements. (Unlike with -vmodule, the ".go"
// must bepresent.)
// -v=0
// Enable V-leveled logging at the specified level.
// -vmodule=""
// The syntax of the argument is a comma-separated list of pattern=N,
// where pattern is a literal file name (minus the ".go" suffix) or
// "glob" pattern and N is a V level. For instance,
// -vmodule=gopher*=3
// sets the V level to 3 in all Go files whose names begin with "gopher",
// and
// -vmodule=/path/to/glog/glog_test=1
// sets the V level to 1 in the Go file /path/to/glog/glog_test.go.
// If a glob pattern contains a slash, it is matched against the full path,
// and the file name. Otherwise, the pattern is
// matched only against the file's basename. When both -vmodule and -v
// are specified, the -vmodule values take precedence for the specified
// modules.
package glog
// This file contains the parts of the log package that are shared among all
// implementations (file, envelope, and appengine).
import (
"bytes"
"context"
"errors"
"fmt"
stdLog "log"
"os"
"reflect"
"runtime"
"runtime/pprof"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/golang/glog/internal/logsink"
"github.com/golang/glog/internal/stackdump"
)
var timeNow = time.Now // Stubbed out for testing.
// MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800
// ErrNoLog is the error we return if no log file has yet been created
// for the specified log type.
var ErrNoLog = errors.New("log file not yet created")
// OutputStats tracks the number of output lines and bytes written.
type OutputStats struct {
lines int64
bytes int64
}
// Lines returns the number of lines written.
func (s *OutputStats) Lines() int64 {
return atomic.LoadInt64(&s.lines)
}
// Bytes returns the number of bytes written.
func (s *OutputStats) Bytes() int64 {
return atomic.LoadInt64(&s.bytes)
}
// Stats tracks the number of lines of output and number of bytes
// per severity level. Values must be read with atomic.LoadInt64.
var Stats struct {
Info, Warning, Error OutputStats
}
var severityStats = [...]*OutputStats{
logsink.Info: &Stats.Info,
logsink.Warning: &Stats.Warning,
logsink.Error: &Stats.Error,
logsink.Fatal: nil,
}
// Level specifies a level of verbosity for V logs. The -v flag is of type
// Level and should be modified only through the flag.Value interface.
type Level int32
var metaPool sync.Pool // Pool of *logsink.Meta.
// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a
// pointer, allocating a new one if necessary. (Returning the interface value
// directly avoids an allocation if there was an existing pointer in the pool.)
func metaPoolGet() (any, *logsink.Meta) {
if metai := metaPool.Get(); metai != nil {
return metai, metai.(*logsink.Meta)
}
meta := new(logsink.Meta)
return meta, meta
}
type stack bool
const (
noStack = stack(false)
withStack = stack(true)
)
func appendBacktrace(depth int, format string, args []any) (string, []any) {
// Capture a backtrace as a stackdump.Stack (both text and PC slice).
// Structured log sinks can extract the backtrace in whichever format they
// prefer (PCs or text), and Text sinks will include it as just another part
// of the log message.
//
// Use depth instead of depth+1 so that the backtrace always includes the
// log function itself - otherwise the reason for the trace appearing in the
// log may not be obvious to the reader.
dump := stackdump.Caller(depth)
// Add an arg and an entry in the format string for the stack dump.
//
// Copy the "args" slice to avoid a rare but serious aliasing bug
// (corrupting the caller's slice if they passed it to a non-Fatal call
// using "...").
format = format + "\n\n%v\n"
args = append(append([]any(nil), args...), dump)
return format, args
}
// logf acts as ctxlogf, but doesn't expect a context.
func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) {
ctxlogf(nil, depth+1, severity, verbose, stack, format, args...)
}
// ctxlogf writes a log message for a log function call (or log function wrapper)
// at the given depth in the current goroutine's stack.
func ctxlogf(ctx context.Context, depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) {
now := timeNow()
_, file, line, ok := runtime.Caller(depth + 1)
if !ok {
file = "???"
line = 1
}
if stack == withStack || backtraceAt(file, line) {
format, args = appendBacktrace(depth+1, format, args)
}
metai, meta := metaPoolGet()
*meta = logsink.Meta{
Context: ctx,
Time: now,
File: file,
Line: line,
Depth: depth + 1,
Severity: severity,
Verbose: verbose,
Thread: int64(pid),
}
sinkf(meta, format, args...)
// Clear pointer fields so they can be garbage collected early.
meta.Context = nil
meta.Stack = nil
metaPool.Put(metai)
}
func sinkf(meta *logsink.Meta, format string, args ...any) {
meta.Depth++
n, err := logsink.Printf(meta, format, args...)
if stats := severityStats[meta.Severity]; stats != nil {
atomic.AddInt64(&stats.lines, 1)
atomic.AddInt64(&stats.bytes, int64(n))
}
if err != nil {
logsink.Printf(meta, "glog: exiting because of error: %s", err)
sinks.file.Flush()
os.Exit(2)
}
}
// CopyStandardLogTo arranges for messages written to the Go "log" package's
// default logs to also appear in the Google logs for the named and lower
// severities. Subsequent changes to the standard log's default output location
// or format may break this behavior.
//
// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
// recognized, CopyStandardLogTo panics.
func CopyStandardLogTo(name string) {
sev, err := logsink.ParseSeverity(name)
if err != nil {
panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err))
}
// Set a log format that captures the user's file and line:
// d.go:23: message
stdLog.SetFlags(stdLog.Lshortfile)
stdLog.SetOutput(logBridge(sev))
}
// NewStandardLogger returns a Logger that writes to the Google logs for the
// named and lower severities.
//
// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
// recognized, NewStandardLogger panics.
func NewStandardLogger(name string) *stdLog.Logger {
sev, err := logsink.ParseSeverity(name)
if err != nil {
panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err))
}
return stdLog.New(logBridge(sev), "", stdLog.Lshortfile)
}
// logBridge provides the Write method that enables CopyStandardLogTo to connect
// Go's standard logs to the logs provided by this package.
type logBridge logsink.Severity
// Write parses the standard logging line and passes its components to the
// logger for severity(lb).
func (lb logBridge) Write(b []byte) (n int, err error) {
var (
file = "???"
line = 1
text string
)
// Split "d.go:23: message" into "d.go", "23", and "message".
if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
text = fmt.Sprintf("bad log format: %s", b)
} else {
file = string(parts[0])
text = string(parts[2][1:]) // skip leading space
line, err = strconv.Atoi(string(parts[1]))
if err != nil {
text = fmt.Sprintf("bad line number: %s", b)
line = 1
}
}
// The depth below hard-codes details of how stdlog gets here. The alternative would be to walk
// up the stack looking for src/log/log.go but that seems like it would be
// unfortunately slow.
const stdLogDepth = 4
metai, meta := metaPoolGet()
*meta = logsink.Meta{
Time: timeNow(),
File: file,
Line: line,
Depth: stdLogDepth,
Severity: logsink.Severity(lb),
Thread: int64(pid),
}
format := "%s"
args := []any{text}
if backtraceAt(file, line) {
format, args = appendBacktrace(meta.Depth, format, args)
}
sinkf(meta, format, args...)
metaPool.Put(metai)
return len(b), nil
}
// defaultFormat returns a fmt.Printf format specifier that formats its
// arguments as if they were passed to fmt.Print.
func defaultFormat(args []any) string {
n := len(args)
switch n {
case 0:
return ""
case 1:
return "%v"
}
b := make([]byte, 0, n*3-1)
wasString := true // Suppress leading space.
for _, arg := range args {
isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String
if wasString || isString {
b = append(b, "%v"...)
} else {
b = append(b, " %v"...)
}
wasString = isString
}
return string(b)
}
// lnFormat returns a fmt.Printf format specifier that formats its arguments
// as if they were passed to fmt.Println.
func lnFormat(args []any) string {
if len(args) == 0 {
return "\n"
}
b := make([]byte, 0, len(args)*3)
for range args {
b = append(b, "%v "...)
}
b[len(b)-1] = '\n' // Replace the last space with a newline.
return string(b)
}
// Verbose is a boolean type that implements Infof (like Printf) etc.
// See the documentation of V for more information.
type Verbose bool
// V reports whether verbosity at the call site is at least the requested level.
// The returned value is a boolean of type Verbose, which implements Info, Infoln
// and Infof. These methods will write to the Info log if called.
// Thus, one may write either
//
// if glog.V(2) { glog.Info("log this") }
//
// or
//
// glog.V(2).Info("log this")
//
// The second form is shorter but the first is cheaper if logging is off because it does
// not evaluate its arguments.
//
// Whether an individual call to V generates a log record depends on the setting of
// the -v and --vmodule flags; both are off by default. If the level in the call to
// V is at most the value of -v, or of -vmodule for the source file containing the
// call, the V call will log.
func V(level Level) Verbose {
return VDepth(1, level)
}
// VDepth acts as V but uses depth to determine which call frame to check vmodule for.
// VDepth(0, level) is the same as V(level).
func VDepth(depth int, level Level) Verbose {
return Verbose(verboseEnabled(depth+1, level))
}
// Info is equivalent to the global Info function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) Info(args ...any) {
v.InfoDepth(1, args...)
}
// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) InfoDepth(depth int, args ...any) {
if v {
logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...)
}
}
// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) InfoDepthf(depth int, format string, args ...any) {
if v {
logf(depth+1, logsink.Info, true, noStack, format, args...)
}
}
// Infoln is equivalent to the global Infoln function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) Infoln(args ...any) {
if v {
logf(1, logsink.Info, true, noStack, lnFormat(args), args...)
}
}
// Infof is equivalent to the global Infof function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) Infof(format string, args ...any) {
if v {
logf(1, logsink.Info, true, noStack, format, args...)
}
}
// InfoContext is equivalent to the global InfoContext function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) InfoContext(ctx context.Context, args ...any) {
v.InfoContextDepth(ctx, 1, args...)
}
// InfoContextf is equivalent to the global InfoContextf function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) InfoContextf(ctx context.Context, format string, args ...any) {
if v {
ctxlogf(ctx, 1, logsink.Info, true, noStack, format, args...)
}
}
// InfoContextDepth is equivalent to the global InfoContextDepth function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) InfoContextDepth(ctx context.Context, depth int, args ...any) {
if v {
ctxlogf(ctx, depth+1, logsink.Info, true, noStack, defaultFormat(args), args...)
}
}
// InfoContextDepthf is equivalent to the global InfoContextDepthf function, guarded by the value of v.
// See the documentation of V for usage.
func (v Verbose) InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) {
if v {
ctxlogf(ctx, depth+1, logsink.Info, true, noStack, format, args...)
}
}
// Info logs to the INFO log.
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
func Info(args ...any) {
InfoDepth(1, args...)
}
// InfoDepth calls Info from a different depth in the call stack.
// This enables a callee to emit logs that use the callsite information of its caller
// or any other callers in the stack. When depth == 0, the original callee's line
// information is emitted. When depth > 0, depth frames are skipped in the call stack
// and the final frame is treated like the original callee to Info.
func InfoDepth(depth int, args ...any) {
logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...)
}
// InfoDepthf acts as InfoDepth but with format string.
func InfoDepthf(depth int, format string, args ...any) {
logf(depth+1, logsink.Info, false, noStack, format, args...)
}
// Infoln logs to the INFO log.
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
func Infoln(args ...any) {
logf(1, logsink.Info, false, noStack, lnFormat(args), args...)
}
// Infof logs to the INFO log.
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
func Infof(format string, args ...any) {
logf(1, logsink.Info, false, noStack, format, args...)
}
// InfoContext is like [Info], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func InfoContext(ctx context.Context, args ...any) {
InfoContextDepth(ctx, 1, args...)
}
// InfoContextf is like [Infof], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func InfoContextf(ctx context.Context, format string, args ...any) {
ctxlogf(ctx, 1, logsink.Info, false, noStack, format, args...)
}
// InfoContextDepth is like [InfoDepth], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func InfoContextDepth(ctx context.Context, depth int, args ...any) {
ctxlogf(ctx, depth+1, logsink.Info, false, noStack, defaultFormat(args), args...)
}
// InfoContextDepthf is like [InfoDepthf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) {
ctxlogf(ctx, depth+1, logsink.Info, false, noStack, format, args...)
}
// Warning logs to the WARNING and INFO logs.
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
func Warning(args ...any) {
WarningDepth(1, args...)
}
// WarningDepth acts as Warning but uses depth to determine which call frame to log.
// WarningDepth(0, "msg") is the same as Warning("msg").
func WarningDepth(depth int, args ...any) {
logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...)
}
// WarningDepthf acts as Warningf but uses depth to determine which call frame to log.
// WarningDepthf(0, "msg") is the same as Warningf("msg").
func WarningDepthf(depth int, format string, args ...any) {
logf(depth+1, logsink.Warning, false, noStack, format, args...)
}
// Warningln logs to the WARNING and INFO logs.
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
func Warningln(args ...any) {
logf(1, logsink.Warning, false, noStack, lnFormat(args), args...)
}
// Warningf logs to the WARNING and INFO logs.
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
func Warningf(format string, args ...any) {
logf(1, logsink.Warning, false, noStack, format, args...)
}
// WarningContext is like [Warning], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func WarningContext(ctx context.Context, args ...any) {
WarningContextDepth(ctx, 1, args...)
}
// WarningContextf is like [Warningf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func WarningContextf(ctx context.Context, format string, args ...any) {
ctxlogf(ctx, 1, logsink.Warning, false, noStack, format, args...)
}
// WarningContextDepth is like [WarningDepth], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func WarningContextDepth(ctx context.Context, depth int, args ...any) {
ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...)
}
// WarningContextDepthf is like [WarningDepthf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func WarningContextDepthf(ctx context.Context, depth int, format string, args ...any) {
ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, format, args...)
}
// Error logs to the ERROR, WARNING, and INFO logs.
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
func Error(args ...any) {
ErrorDepth(1, args...)
}
// ErrorDepth acts as Error but uses depth to determine which call frame to log.
// ErrorDepth(0, "msg") is the same as Error("msg").
func ErrorDepth(depth int, args ...any) {
logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...)
}
// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log.
// ErrorDepthf(0, "msg") is the same as Errorf("msg").
func ErrorDepthf(depth int, format string, args ...any) {
logf(depth+1, logsink.Error, false, noStack, format, args...)
}
// Errorln logs to the ERROR, WARNING, and INFO logs.
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
func Errorln(args ...any) {
logf(1, logsink.Error, false, noStack, lnFormat(args), args...)
}
// Errorf logs to the ERROR, WARNING, and INFO logs.
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
func Errorf(format string, args ...any) {
logf(1, logsink.Error, false, noStack, format, args...)
}
// ErrorContext is like [Error], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ErrorContext(ctx context.Context, args ...any) {
ErrorContextDepth(ctx, 1, args...)
}
// ErrorContextf is like [Errorf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ErrorContextf(ctx context.Context, format string, args ...any) {
ctxlogf(ctx, 1, logsink.Error, false, noStack, format, args...)
}
// ErrorContextDepth is like [ErrorDepth], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ErrorContextDepth(ctx context.Context, depth int, args ...any) {
ctxlogf(ctx, depth+1, logsink.Error, false, noStack, defaultFormat(args), args...)
}
// ErrorContextDepthf is like [ErrorDepthf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ErrorContextDepthf(ctx context.Context, depth int, format string, args ...any) {
ctxlogf(ctx, depth+1, logsink.Error, false, noStack, format, args...)
}
func ctxfatalf(ctx context.Context, depth int, format string, args ...any) {
ctxlogf(ctx, depth+1, logsink.Fatal, false, withStack, format, args...)
sinks.file.Flush()
err := abortProcess() // Should not return.
// Failed to abort the process using signals. Dump a stack trace and exit.
Errorf("abortProcess returned unexpectedly: %v", err)
sinks.file.Flush()
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
os.Exit(2) // Exit with the same code as the default SIGABRT handler.
}
func fatalf(depth int, format string, args ...any) {
ctxfatalf(nil, depth+1, format, args...)
}
// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
// including a stack trace of all running goroutines, then calls os.Exit(2).
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
func Fatal(args ...any) {
FatalDepth(1, args...)
}
// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
// FatalDepth(0, "msg") is the same as Fatal("msg").
func FatalDepth(depth int, args ...any) {
fatalf(depth+1, defaultFormat(args), args...)
}
// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log.
// FatalDepthf(0, "msg") is the same as Fatalf("msg").
func FatalDepthf(depth int, format string, args ...any) {
fatalf(depth+1, format, args...)
}
// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
// including a stack trace of all running goroutines, then calls os.Exit(2).
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
func Fatalln(args ...any) {
fatalf(1, lnFormat(args), args...)
}
// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
// including a stack trace of all running goroutines, then calls os.Exit(2).
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
func Fatalf(format string, args ...any) {
fatalf(1, format, args...)
}
// FatalContext is like [Fatal], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func FatalContext(ctx context.Context, args ...any) {
FatalContextDepth(ctx, 1, args...)
}
// FatalContextf is like [Fatalf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func FatalContextf(ctx context.Context, format string, args ...any) {
ctxfatalf(ctx, 1, format, args...)
}
// FatalContextDepth is like [FatalDepth], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func FatalContextDepth(ctx context.Context, depth int, args ...any) {
ctxfatalf(ctx, depth+1, defaultFormat(args), args...)
}
// FatalContextDepthf is like [FatalDepthf], but with an extra [context.Context] parameter.
func FatalContextDepthf(ctx context.Context, depth int, format string, args ...any) {
ctxfatalf(ctx, depth+1, format, args...)
}
func ctxexitf(ctx context.Context, depth int, format string, args ...any) {
ctxlogf(ctx, depth+1, logsink.Fatal, false, noStack, format, args...)
sinks.file.Flush()
os.Exit(1)
}
func exitf(depth int, format string, args ...any) {
ctxexitf(nil, depth+1, format, args...)
}
// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
func Exit(args ...any) {
ExitDepth(1, args...)
}
// ExitDepth acts as Exit but uses depth to determine which call frame to log.
// ExitDepth(0, "msg") is the same as Exit("msg").
func ExitDepth(depth int, args ...any) {
exitf(depth+1, defaultFormat(args), args...)
}
// ExitDepthf acts as Exitf but uses depth to determine which call frame to log.
// ExitDepthf(0, "msg") is the same as Exitf("msg").
func ExitDepthf(depth int, format string, args ...any) {
exitf(depth+1, format, args...)
}
// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
func Exitln(args ...any) {
exitf(1, lnFormat(args), args...)
}
// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
func Exitf(format string, args ...any) {
exitf(1, format, args...)
}
// ExitContext is like [Exit], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ExitContext(ctx context.Context, args ...any) {
ExitContextDepth(ctx, 1, args...)
}
// ExitContextf is like [Exitf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ExitContextf(ctx context.Context, format string, args ...any) {
ctxexitf(ctx, 1, format, args...)
}
// ExitContextDepth is like [ExitDepth], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ExitContextDepth(ctx context.Context, depth int, args ...any) {
ctxexitf(ctx, depth+1, defaultFormat(args), args...)
}
// ExitContextDepthf is like [ExitDepthf], but with an extra [context.Context] parameter. The
// context is used to pass the Trace Context to log sinks.
func ExitContextDepthf(ctx context.Context, depth int, format string, args ...any) {
ctxexitf(ctx, depth+1, format, args...)
}

420
vendor/github.com/golang/glog/glog_file.go generated vendored Normal file
View File

@@ -0,0 +1,420 @@
// Go support for leveled logs, analogous to https://github.com/google/glog.
//
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// File I/O for logs.
package glog
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/golang/glog/internal/logsink"
)
// logDirs lists the candidate directories for new log files.
var logDirs []string
var (
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files")
logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+
" (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.")
)
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
}
logDirs = append(logDirs, os.TempDir())
}
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it is used to construct file paths.
userName = strings.Map(func(r rune) rune {
switch {
case r >= 'a' && r <= 'z':
case r >= 'A' && r <= 'Z':
case r >= '0' && r <= '9':
default:
return '_'
}
return r
}, userName)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// logName returns a new log file name containing tag, with start time t, and
// the name for the symlink for tag.
func logName(tag string, t time.Time) (name, link string) {
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
program,
host,
userName,
tag,
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
pid)
return name, program + "." + tag
}
var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
}
name, link := logName(tag, t)
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
f, err := os.Create(fname)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
os.Symlink(name, symlink) // ignore err
if *logLink != "" {
lsymlink := filepath.Join(*logLink, link)
os.Remove(lsymlink) // ignore err
os.Symlink(fname, lsymlink) // ignore err
}
return f, fname, nil
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}
// flushSyncWriter is the interface satisfied by logging destinations.
type flushSyncWriter interface {
Flush() error
Sync() error
io.Writer
filenames() []string
}
var sinks struct {
stderr stderrSink
file fileSink
}
func init() {
// Register stderr first: that way if we crash during file-writing at least
// the log will have gone somewhere.
logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file)
sinks.file.flushChan = make(chan logsink.Severity, 1)
go sinks.file.flushDaemon()
}
// stderrSink is a logsink.Text that writes log entries to stderr
// if they meet certain conditions.
type stderrSink struct {
mu sync.Mutex
w io.Writer // if nil Emit uses os.Stderr directly
}
// Enabled implements logsink.Text.Enabled. It returns true if any of the
// various stderr flags are enabled for logs of the given severity, if the log
// message is from the standard "log" package, or if google.Init has not yet run
// (and hence file logging is not yet initialized).
func (s *stderrSink) Enabled(m *logsink.Meta) bool {
return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get()
}
// Emit implements logsink.Text.Emit.
func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) {
s.mu.Lock()
defer s.mu.Unlock()
w := s.w
if w == nil {
w = os.Stderr
}
dn, err := w.Write(data)
n += dn
return n, err
}
// severityWriters is an array of flushSyncWriter with a value for each
// logsink.Severity.
type severityWriters [4]flushSyncWriter
// fileSink is a logsink.Text that prints to a set of Google log files.
type fileSink struct {
mu sync.Mutex
// file holds writer for each of the log types.
file severityWriters
flushChan chan logsink.Severity
}
// Enabled implements logsink.Text.Enabled. It returns true if google.Init
// has run and both --disable_log_to_disk and --logtostderr are false.
func (s *fileSink) Enabled(m *logsink.Meta) bool {
return !toStderr
}
// Emit implements logsink.Text.Emit
func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) {
s.mu.Lock()
defer s.mu.Unlock()
if err = s.createMissingFiles(m.Severity); err != nil {
return 0, err
}
for sev := m.Severity; sev >= logsink.Info; sev-- {
if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil {
err = fErr // Take the first error.
}
}
n = len(data)
if int(m.Severity) > *logBufLevel {
select {
case s.flushChan <- m.Severity:
default:
}
}
return n, err
}
// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
// file's Sync method and providing a wrapper for the Write method that provides log
// file rotation. There are conflicting methods, so the file cannot be embedded.
// s.mu is held for all its methods.
type syncBuffer struct {
sink *fileSink
*bufio.Writer
file *os.File
names []string
sev logsink.Severity
nbytes uint64 // The number of bytes written to this file
}
func (sb *syncBuffer) Sync() error {
return sb.file.Sync()
}
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
if sb.nbytes+uint64(len(p)) >= MaxSize {
if err := sb.rotateFile(time.Now()); err != nil {
return 0, err
}
}
n, err = sb.Writer.Write(p)
sb.nbytes += uint64(n)
return n, err
}
func (sb *syncBuffer) filenames() []string {
return sb.names
}
const footer = "\nCONTINUED IN NEXT FILE\n"
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
var err error
pn := "<none>"
file, name, err := create(sb.sev.String(), now)
if sb.file != nil {
// The current log file becomes the previous log at the end of
// this block, so save its name for use in the header of the next
// file.
pn = sb.file.Name()
sb.Flush()
// If there's an existing file, write a footer with the name of
// the next file in the chain, followed by the constant string
// \nCONTINUED IN NEXT FILE\n to make continuation detection simple.
sb.file.Write([]byte("Next log: "))
sb.file.Write([]byte(name))
sb.file.Write([]byte(footer))
sb.file.Close()
}
sb.file = file
sb.names = append(sb.names, name)
sb.nbytes = 0
if err != nil {
return err
}
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
// Write header.
var buf bytes.Buffer
fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
fmt.Fprintf(&buf, "Running on machine: %s\n", host)
fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
fmt.Fprintf(&buf, "Previous log: %s\n", pn)
fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
n, err := sb.file.Write(buf.Bytes())
sb.nbytes += uint64(n)
return err
}
// bufferSize sizes the buffer associated with each log file. It's large
// so that log records can accumulate without the logging thread blocking
// on disk I/O. The flushDaemon will block instead.
const bufferSize = 256 * 1024
// createMissingFiles creates all the log files for severity from infoLog up to
// upTo that have not already been created.
// s.mu is held.
func (s *fileSink) createMissingFiles(upTo logsink.Severity) error {
if s.file[upTo] != nil {
return nil
}
now := time.Now()
// Files are created in increasing severity order, so we can be assured that
// if a high severity logfile exists, then so do all of lower severity.
for sev := logsink.Info; sev <= upTo; sev++ {
if s.file[sev] != nil {
continue
}
sb := &syncBuffer{
sink: s,
sev: sev,
}
if err := sb.rotateFile(now); err != nil {
return err
}
s.file[sev] = sb
}
return nil
}
// flushDaemon periodically flushes the log file buffers.
func (s *fileSink) flushDaemon() {
tick := time.NewTicker(30 * time.Second)
defer tick.Stop()
for {
select {
case <-tick.C:
s.Flush()
case sev := <-s.flushChan:
s.flush(sev)
}
}
}
// Flush flushes all pending log I/O.
func Flush() {
sinks.file.Flush()
}
// Flush flushes all the logs and attempts to "sync" their data to disk.
func (s *fileSink) Flush() error {
return s.flush(logsink.Info)
}
// flush flushes all logs of severity threshold or greater.
func (s *fileSink) flush(threshold logsink.Severity) error {
var firstErr error
updateErr := func(err error) {
if err != nil && firstErr == nil {
firstErr = err
}
}
// Remember where we flushed, so we can call sync without holding
// the lock.
var files []flushSyncWriter
func() {
s.mu.Lock()
defer s.mu.Unlock()
// Flush from fatal down, in case there's trouble flushing.
for sev := logsink.Fatal; sev >= threshold; sev-- {
if file := s.file[sev]; file != nil {
updateErr(file.Flush())
files = append(files, file)
}
}
}()
for _, file := range files {
updateErr(file.Sync())
}
return firstErr
}
// Names returns the names of the log files holding the FATAL, ERROR,
// WARNING, or INFO logs. Returns ErrNoLog if the log for the given
// level doesn't exist (e.g. because no messages of that level have been
// written). This may return multiple names if the log type requested
// has rolled over.
func Names(s string) ([]string, error) {
severity, err := logsink.ParseSeverity(s)
if err != nil {
return nil, err
}
sinks.file.mu.Lock()
defer sinks.file.mu.Unlock()
f := sinks.file.file[severity]
if f == nil {
return nil, ErrNoLog
}
return f.filenames(), nil
}

39
vendor/github.com/golang/glog/glog_file_linux.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
// Go support for leveled logs, analogous to https://github.com/google/glog.
//
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
package glog
import (
"errors"
"runtime"
"syscall"
)
// abortProcess attempts to kill the current process in a way that will dump the
// currently-running goroutines someplace useful (like stderr).
//
// It does this by sending SIGABRT to the current thread.
//
// If successful, abortProcess does not return.
func abortProcess() error {
runtime.LockOSThread()
if err := syscall.Tgkill(syscall.Getpid(), syscall.Gettid(), syscall.SIGABRT); err != nil {
return err
}
return errors.New("log: killed current thread with SIGABRT, but still running")
}

30
vendor/github.com/golang/glog/glog_file_other.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
// Go support for leveled logs, analogous to https://github.com/google/glog.
//
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !(unix || windows)
package glog
import (
"fmt"
"runtime"
)
// abortProcess returns an error on platforms that presumably don't support signals.
func abortProcess() error {
return fmt.Errorf("not sending SIGABRT (%s/%s does not support signals), falling back", runtime.GOOS, runtime.GOARCH)
}

53
vendor/github.com/golang/glog/glog_file_posix.go generated vendored Normal file
View File

@@ -0,0 +1,53 @@
// Go support for leveled logs, analogous to https://github.com/google/glog.
//
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build (unix || windows) && !linux
package glog
import (
"os"
"syscall"
"time"
)
// abortProcess attempts to kill the current process in a way that will dump the
// currently-running goroutines someplace useful (like stderr).
//
// It does this by sending SIGABRT to the current process. Unfortunately, the
// signal may or may not be delivered to the current thread; in order to do that
// portably, we would need to add a cgo dependency and call pthread_kill.
//
// If successful, abortProcess does not return.
func abortProcess() error {
p, err := os.FindProcess(os.Getpid())
if err != nil {
return err
}
if err := p.Signal(syscall.SIGABRT); err != nil {
return err
}
// Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to
// run (and eventually terminate the process themselves).
//
// We could just "select{}" here, but there's an outside chance that would
// trigger the runtime's deadlock detector if there happen not to be any
// background goroutines running. So we'll sleep a while first to give
// the signal some time.
time.Sleep(10 * time.Second)
select {}
}

398
vendor/github.com/golang/glog/glog_flags.go generated vendored Normal file
View File

@@ -0,0 +1,398 @@
// Go support for leveled logs, analogous to https://github.com/google/glog.
//
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package glog
import (
"bytes"
"errors"
"flag"
"fmt"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/golang/glog/internal/logsink"
)
// modulePat contains a filter for the -vmodule flag.
// It holds a verbosity level and a file pattern to match.
type modulePat struct {
pattern string
literal bool // The pattern is a literal string
full bool // The pattern wants to match the full path
level Level
}
// match reports whether the file matches the pattern. It uses a string
// comparison if the pattern contains no metacharacters.
func (m *modulePat) match(full, file string) bool {
if m.literal {
if m.full {
return full == m.pattern
}
return file == m.pattern
}
if m.full {
match, _ := filepath.Match(m.pattern, full)
return match
}
match, _ := filepath.Match(m.pattern, file)
return match
}
// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
// that require filepath.Match to be called to match the pattern.
func isLiteral(pattern string) bool {
return !strings.ContainsAny(pattern, `\*?[]`)
}
// isFull reports whether the pattern matches the full file path, that is,
// whether it contains /.
func isFull(pattern string) bool {
return strings.ContainsRune(pattern, '/')
}
// verboseFlags represents the setting of the -v and -vmodule flags.
type verboseFlags struct {
// moduleLevelCache is a sync.Map storing the -vmodule Level for each V()
// call site, identified by PC. If there is no matching -vmodule filter,
// the cached value is exactly v. moduleLevelCache is replaced with a new
// Map whenever the -vmodule or -v flag changes state.
moduleLevelCache atomic.Value
// mu guards all fields below.
mu sync.Mutex
// v stores the value of the -v flag. It may be read safely using
// sync.LoadInt32, but is only modified under mu.
v Level
// module stores the parsed -vmodule flag.
module []modulePat
// moduleLength caches len(module). If greater than zero, it
// means vmodule is enabled. It may be read safely using sync.LoadInt32, but
// is only modified under mu.
moduleLength int32
}
// NOTE: For compatibility with the open-sourced v1 version of this
// package (github.com/golang/glog) we need to retain that flag.Level
// implements the flag.Value interface. See also go/log-vs-glog.
// String is part of the flag.Value interface.
func (l *Level) String() string {
return strconv.FormatInt(int64(l.Get().(Level)), 10)
}
// Get is part of the flag.Value interface.
func (l *Level) Get() any {
if l == &vflags.v {
// l is the value registered for the -v flag.
return Level(atomic.LoadInt32((*int32)(l)))
}
return *l
}
// Set is part of the flag.Value interface.
func (l *Level) Set(value string) error {
v, err := strconv.Atoi(value)
if err != nil {
return err
}
if l == &vflags.v {
// l is the value registered for the -v flag.
vflags.mu.Lock()
defer vflags.mu.Unlock()
vflags.moduleLevelCache.Store(&sync.Map{})
atomic.StoreInt32((*int32)(l), int32(v))
return nil
}
*l = Level(v)
return nil
}
// vModuleFlag is the flag.Value for the --vmodule flag.
type vModuleFlag struct{ *verboseFlags }
func (f vModuleFlag) String() string {
// Do not panic on the zero value.
// https://groups.google.com/g/golang-nuts/c/Atlr8uAjn6U/m/iId17Td5BQAJ.
if f.verboseFlags == nil {
return ""
}
f.mu.Lock()
defer f.mu.Unlock()
var b bytes.Buffer
for i, f := range f.module {
if i > 0 {
b.WriteRune(',')
}
fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
}
return b.String()
}
// Get returns nil for this flag type since the struct is not exported.
func (f vModuleFlag) Get() any { return nil }
var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3
func (f vModuleFlag) Set(value string) error {
var filter []modulePat
for _, pat := range strings.Split(value, ",") {
if len(pat) == 0 {
// Empty strings such as from a trailing comma can be ignored.
continue
}
patLev := strings.Split(pat, "=")
if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
return errVmoduleSyntax
}
pattern := patLev[0]
v, err := strconv.Atoi(patLev[1])
if err != nil {
return errors.New("syntax error: expect comma-separated list of filename=N")
}
// TODO: check syntax of filter?
filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)})
}
f.mu.Lock()
defer f.mu.Unlock()
f.module = filter
atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module)))
f.moduleLevelCache.Store(&sync.Map{})
return nil
}
func (f *verboseFlags) levelForPC(pc uintptr) Level {
if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok {
return level.(Level)
}
f.mu.Lock()
defer f.mu.Unlock()
level := Level(f.v)
fn := runtime.FuncForPC(pc)
file, _ := fn.FileLine(pc)
// The file is something like /a/b/c/d.go. We want just the d for
// regular matches, /a/b/c/d for full matches.
file = strings.TrimSuffix(file, ".go")
full := file
if slash := strings.LastIndex(file, "/"); slash >= 0 {
file = file[slash+1:]
}
for _, filter := range f.module {
if filter.match(full, file) {
level = filter.level
break // Use the first matching level.
}
}
f.moduleLevelCache.Load().(*sync.Map).Store(pc, level)
return level
}
func (f *verboseFlags) enabled(callerDepth int, level Level) bool {
if atomic.LoadInt32(&f.moduleLength) == 0 {
// No vmodule values specified, so compare against v level.
return Level(atomic.LoadInt32((*int32)(&f.v))) >= level
}
pcs := [1]uintptr{}
if runtime.Callers(callerDepth+2, pcs[:]) < 1 {
return false
}
frame, _ := runtime.CallersFrames(pcs[:]).Next()
return f.levelForPC(frame.Entry) >= level
}
// traceLocation represents an entry in the -log_backtrace_at flag.
type traceLocation struct {
file string
line int
}
var errTraceSyntax = errors.New("syntax error: expect file.go:234")
func parseTraceLocation(value string) (traceLocation, error) {
fields := strings.Split(value, ":")
if len(fields) != 2 {
return traceLocation{}, errTraceSyntax
}
file, lineStr := fields[0], fields[1]
if !strings.Contains(file, ".") {
return traceLocation{}, errTraceSyntax
}
line, err := strconv.Atoi(lineStr)
if err != nil {
return traceLocation{}, errTraceSyntax
}
if line < 0 {
return traceLocation{}, errors.New("negative value for line")
}
return traceLocation{file, line}, nil
}
// match reports whether the specified file and line matches the trace location.
// The argument file name is the full path, not the basename specified in the flag.
func (t traceLocation) match(file string, line int) bool {
if t.line != line {
return false
}
if i := strings.LastIndex(file, "/"); i >= 0 {
file = file[i+1:]
}
return t.file == file
}
func (t traceLocation) String() string {
return fmt.Sprintf("%s:%d", t.file, t.line)
}
// traceLocations represents the -log_backtrace_at flag.
// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456
// Note that unlike vmodule the file extension is included here.
type traceLocations struct {
mu sync.Mutex
locsLen int32 // Safe for atomic read without mu.
locs []traceLocation
}
func (t *traceLocations) String() string {
t.mu.Lock()
defer t.mu.Unlock()
var buf bytes.Buffer
for i, tl := range t.locs {
if i > 0 {
buf.WriteString(",")
}
buf.WriteString(tl.String())
}
return buf.String()
}
// Get always returns nil for this flag type since the struct is not exported
func (t *traceLocations) Get() any { return nil }
func (t *traceLocations) Set(value string) error {
var locs []traceLocation
for _, s := range strings.Split(value, ",") {
if s == "" {
continue
}
loc, err := parseTraceLocation(s)
if err != nil {
return err
}
locs = append(locs, loc)
}
t.mu.Lock()
defer t.mu.Unlock()
atomic.StoreInt32(&t.locsLen, int32(len(locs)))
t.locs = locs
return nil
}
func (t *traceLocations) match(file string, line int) bool {
if atomic.LoadInt32(&t.locsLen) == 0 {
return false
}
t.mu.Lock()
defer t.mu.Unlock()
for _, tl := range t.locs {
if tl.match(file, line) {
return true
}
}
return false
}
// severityFlag is an atomic flag.Value implementation for logsink.Severity.
type severityFlag int32
func (s *severityFlag) get() logsink.Severity {
return logsink.Severity(atomic.LoadInt32((*int32)(s)))
}
func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) }
func (s *severityFlag) Get() any { return s.get() }
func (s *severityFlag) Set(value string) error {
threshold, err := logsink.ParseSeverity(value)
if err != nil {
// Not a severity name. Try a raw number.
v, err := strconv.Atoi(value)
if err != nil {
return err
}
threshold = logsink.Severity(v)
if threshold < logsink.Info || threshold > logsink.Fatal {
return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal)
}
}
atomic.StoreInt32((*int32)(s), int32(threshold))
return nil
}
var (
vflags verboseFlags // The -v and -vmodule flags.
logBacktraceAt traceLocations // The -log_backtrace_at flag.
// Boolean flags. Not handled atomically because the flag.Value interface
// does not let us avoid the =true, and that shorthand is necessary for
// compatibility. TODO: does this matter enough to fix? Seems unlikely.
toStderr bool // The -logtostderr flag.
alsoToStderr bool // The -alsologtostderr flag.
stderrThreshold severityFlag // The -stderrthreshold flag.
)
// verboseEnabled returns whether the caller at the given depth should emit
// verbose logs at the given level, with depth 0 identifying the caller of
// verboseEnabled.
func verboseEnabled(callerDepth int, level Level) bool {
return vflags.enabled(callerDepth+1, level)
}
// backtraceAt returns whether the logging call at the given function and line
// should also emit a backtrace of the current call stack.
func backtraceAt(file string, line int) bool {
return logBacktraceAt.match(file, line)
}
func init() {
vflags.moduleLevelCache.Store(&sync.Map{})
flag.Var(&vflags.v, "v", "log level for V logs")
flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
stderrThreshold = severityFlag(logsink.Error)
flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files")
flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
}

View File

@@ -0,0 +1,393 @@
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logsink
import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog/internal/stackdump"
)
// MaxLogMessageLen is the limit on length of a formatted log message, including
// the standard line prefix and trailing newline.
//
// Chosen to match C++ glog.
const MaxLogMessageLen = 15000
// A Severity is a severity at which a message can be logged.
type Severity int8
// These constants identify the log levels in order of increasing severity.
// A message written to a high-severity log file is also written to each
// lower-severity log file.
const (
Info Severity = iota
Warning
Error
// Fatal contains logs written immediately before the process terminates.
//
// Sink implementations should not terminate the process themselves: the log
// package will perform any necessary cleanup and terminate the process as
// appropriate.
Fatal
)
func (s Severity) String() string {
switch s {
case Info:
return "INFO"
case Warning:
return "WARNING"
case Error:
return "ERROR"
case Fatal:
return "FATAL"
}
return fmt.Sprintf("%T(%d)", s, s)
}
// ParseSeverity returns the case-insensitive Severity value for the given string.
func ParseSeverity(name string) (Severity, error) {
name = strings.ToUpper(name)
for s := Info; s <= Fatal; s++ {
if s.String() == name {
return s, nil
}
}
return -1, fmt.Errorf("logsink: invalid severity %q", name)
}
// Meta is metadata about a logging call.
type Meta struct {
// The context with which the log call was made (or nil). If set, the context
// is only valid during the logsink.Structured.Printf call, it should not be
// retained.
Context context.Context
// Time is the time at which the log call was made.
Time time.Time
// File is the source file from which the log entry originates.
File string
// Line is the line offset within the source file.
Line int
// Depth is the number of stack frames between the logsink and the log call.
Depth int
Severity Severity
// Verbose indicates whether the call was made via "log.V". Log entries below
// the current verbosity threshold are not sent to the sink.
Verbose bool
// Thread ID. This can be populated with a thread ID from another source,
// such as a system we are importing logs from. In the normal case, this
// will be set to the process ID (PID), since Go doesn't have threads.
Thread int64
// Stack trace starting in the logging function. May be nil.
// A logsink should implement the StackWanter interface to request this.
//
// Even if WantStack returns false, this field may be set (e.g. if another
// sink wants a stack trace).
Stack *stackdump.Stack
}
// Structured is a logging destination that accepts structured data as input.
type Structured interface {
// Printf formats according to a fmt.Printf format specifier and writes a log
// entry. The precise result of formatting depends on the sink, but should
// aim for consistency with fmt.Printf.
//
// Printf returns the number of bytes occupied by the log entry, which
// may not be equal to the total number of bytes written.
//
// Printf returns any error encountered *if* it is severe enough that the log
// package should terminate the process.
//
// The sink must not modify the *Meta parameter, nor reference it after
// Printf has returned: it may be reused in subsequent calls.
Printf(meta *Meta, format string, a ...any) (n int, err error)
}
// StackWanter can be implemented by a logsink.Structured to indicate that it
// wants a stack trace to accompany at least some of the log messages it receives.
type StackWanter interface {
// WantStack returns true if the sink requires a stack trace for a log message
// with this metadata.
//
// NOTE: Returning true implies that meta.Stack will be non-nil. Returning
// false does NOT imply that meta.Stack will be nil.
WantStack(meta *Meta) bool
}
// Text is a logging destination that accepts pre-formatted log lines (instead of
// structured data).
type Text interface {
// Enabled returns whether this sink should output messages for the given
// Meta. If the sink returns false for a given Meta, the Printf function will
// not call Emit on it for the corresponding log message.
Enabled(*Meta) bool
// Emit writes a pre-formatted text log entry (including any applicable
// header) to the log. It returns the number of bytes occupied by the entry
// (which may differ from the length of the passed-in slice).
//
// Emit returns any error encountered *if* it is severe enough that the log
// package should terminate the process.
//
// The sink must not modify the *Meta parameter, nor reference it after
// Printf has returned: it may be reused in subsequent calls.
//
// NOTE: When developing a text sink, keep in mind the surface in which the
// logs will be displayed, and whether it's important that the sink be
// resistent to tampering in the style of b/211428300. Standard text sinks
// (like `stderrSink`) do not protect against this (e.g. by escaping
// characters) because the cases where they would show user-influenced bytes
// are vanishingly small.
Emit(*Meta, []byte) (n int, err error)
}
// bufs is a pool of *bytes.Buffer used in formatting log entries.
var bufs sync.Pool // Pool of *bytes.Buffer.
// textPrintf formats a text log entry and emits it to all specified Text sinks.
//
// The returned n is the maximum across all Emit calls.
// The returned err is the first non-nil error encountered.
// Sinks that are disabled by configuration should return (0, nil).
func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) {
// We expect at most file, stderr, and perhaps syslog. If there are more,
// we'll end up allocating - no big deal.
const maxExpectedTextSinks = 3
var noAllocSinks [maxExpectedTextSinks]Text
sinks := noAllocSinks[:0]
for _, s := range textSinks {
if s.Enabled(m) {
sinks = append(sinks, s)
}
}
if len(sinks) == 0 && m.Severity != Fatal {
return 0, nil // No TextSinks specified; don't bother formatting.
}
bufi := bufs.Get()
var buf *bytes.Buffer
if bufi == nil {
buf = bytes.NewBuffer(nil)
bufi = buf
} else {
buf = bufi.(*bytes.Buffer)
buf.Reset()
}
// Lmmdd hh:mm:ss.uuuuuu PID/GID file:line]
//
// The "PID" entry arguably ought to be TID for consistency with other
// environments, but TID is not meaningful in a Go program due to the
// multiplexing of goroutines across threads.
//
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
// It's worth about 3X. Fprintf is hard.
const severityChar = "IWEF"
buf.WriteByte(severityChar[m.Severity])
_, month, day := m.Time.Date()
hour, minute, second := m.Time.Clock()
twoDigits(buf, int(month))
twoDigits(buf, day)
buf.WriteByte(' ')
twoDigits(buf, hour)
buf.WriteByte(':')
twoDigits(buf, minute)
buf.WriteByte(':')
twoDigits(buf, second)
buf.WriteByte('.')
nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0')
buf.WriteByte(' ')
nDigits(buf, 7, uint64(m.Thread), ' ')
buf.WriteByte(' ')
{
file := m.File
if i := strings.LastIndex(file, "/"); i >= 0 {
file = file[i+1:]
}
buf.WriteString(file)
}
buf.WriteByte(':')
{
var tmp [19]byte
buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10))
}
buf.WriteString("] ")
msgStart := buf.Len()
fmt.Fprintf(buf, format, args...)
if buf.Len() > MaxLogMessageLen-1 {
buf.Truncate(MaxLogMessageLen - 1)
}
msgEnd := buf.Len()
if b := buf.Bytes(); b[len(b)-1] != '\n' {
buf.WriteByte('\n')
}
for _, s := range sinks {
sn, sErr := s.Emit(m, buf.Bytes())
if sn > n {
n = sn
}
if sErr != nil && err == nil {
err = sErr
}
}
if m.Severity == Fatal {
savedM := *m
fatalMessageStore(savedEntry{
meta: &savedM,
msg: buf.Bytes()[msgStart:msgEnd],
})
} else {
bufs.Put(bufi)
}
return n, err
}
const digits = "0123456789"
// twoDigits formats a zero-prefixed two-digit integer to buf.
func twoDigits(buf *bytes.Buffer, d int) {
buf.WriteByte(digits[(d/10)%10])
buf.WriteByte(digits[d%10])
}
// nDigits formats an n-digit integer to buf, padding with pad on the left. It
// assumes d != 0.
func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) {
var tmp [20]byte
cutoff := len(tmp) - n
j := len(tmp) - 1
for ; d > 0; j-- {
tmp[j] = digits[d%10]
d /= 10
}
for ; j >= cutoff; j-- {
tmp[j] = pad
}
j++
buf.Write(tmp[j:])
}
// Printf writes a log entry to all registered TextSinks in this package, then
// to all registered StructuredSinks.
//
// The returned n is the maximum across all Emit and Printf calls.
// The returned err is the first non-nil error encountered.
// Sinks that are disabled by configuration should return (0, nil).
func Printf(m *Meta, format string, args ...any) (n int, err error) {
m.Depth++
n, err = textPrintf(m, TextSinks, format, args...)
for _, sink := range StructuredSinks {
// TODO: Support TextSinks that implement StackWanter?
if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) {
if m.Stack == nil {
// First, try to find a stacktrace in args, otherwise generate one.
for _, arg := range args {
if stack, ok := arg.(stackdump.Stack); ok {
m.Stack = &stack
break
}
}
if m.Stack == nil {
stack := stackdump.Caller( /* skipDepth = */ m.Depth)
m.Stack = &stack
}
}
}
sn, sErr := sink.Printf(m, format, args...)
if sn > n {
n = sn
}
if sErr != nil && err == nil {
err = sErr
}
}
return n, err
}
// The sets of sinks to which logs should be written.
//
// These must only be modified during package init, and are read-only thereafter.
var (
// StructuredSinks is the set of Structured sink instances to which logs
// should be written.
StructuredSinks []Structured
// TextSinks is the set of Text sink instances to which logs should be
// written.
//
// These are registered separately from Structured sink implementations to
// avoid the need to repeat the work of formatting a message for each Text
// sink that writes it. The package-level Printf function writes to both sets
// independenty, so a given log destination should only register a Structured
// *or* a Text sink (not both).
TextSinks []Text
)
type savedEntry struct {
meta *Meta
msg []byte
}
// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks.
//
// The purpose of this sink is to allow applications to intercept logging calls before they are
// serialized and sent to Text sinks. For example, if one needs to redact PII from logging
// arguments before they reach STDERR, one solution would be to do the redacting in a Structured
// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that
// StructuredTextWrapper instance. This is how one could set this up in their application:
//
// func init() {
//
// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks}
// // sanitizersink will intercept logs and remove PII
// sanitizer := sanitizersink{Sink: &wrapper}
// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer)
// logsink.TextSinks = nil
//
// }
type StructuredTextWrapper struct {
// TextSinks is the set of Text sinks that should receive logs from this
// StructuredTextWrapper instance.
TextSinks []Text
}
// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper.
func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) {
return textPrintf(meta, w.TextSinks, format, args...)
}

View File

@@ -0,0 +1,35 @@
package logsink
import (
"sync/atomic"
"unsafe"
)
func fatalMessageStore(e savedEntry) {
// Only put a new one in if we haven't assigned before.
atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e))
}
var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer
// FatalMessage returns the Meta and message contents of the first message
// logged with Fatal severity, or false if none has occurred.
func FatalMessage() (*Meta, []byte, bool) {
e := (*savedEntry)(atomic.LoadPointer(&fatalMessage))
if e == nil {
return nil, nil, false
}
return e.meta, e.msg, true
}
// DoNotUseRacyFatalMessage is FatalMessage, but worse.
//
//go:norace
//go:nosplit
func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) {
e := (*savedEntry)(fatalMessage)
if e == nil {
return nil, nil, false
}
return e.meta, e.msg, true
}

View File

@@ -0,0 +1,127 @@
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stackdump provides wrappers for runtime.Stack and runtime.Callers
// with uniform support for skipping caller frames.
//
// ⚠ Unlike the functions in the runtime package, these may allocate a
// non-trivial quantity of memory: use them with care. ⚠
package stackdump
import (
"bytes"
"runtime"
)
// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to
// runtime.Stack itself or 0 if it does not.
//
// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc
// compiler does not.
var runtimeStackSelfFrames = func() int {
for n := 1 << 10; n < 1<<20; n *= 2 {
buf := make([]byte, n)
n := runtime.Stack(buf, false)
if bytes.Contains(buf[:n], []byte("runtime.Stack")) {
return 1
} else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 {
return 0
}
}
return 0
}()
// Stack is a stack dump for a single goroutine.
type Stack struct {
// Text is a representation of the stack dump in a human-readable format.
Text []byte
// PC is a representation of the stack dump using raw program counter values.
PC []uintptr
}
func (s Stack) String() string { return string(s.Text) }
// Caller returns the Stack dump for the calling goroutine, starting skipDepth
// frames before the caller of Caller. (Caller(0) provides a dump starting at
// the caller of this function.)
func Caller(skipDepth int) Stack {
return Stack{
Text: CallerText(skipDepth + 1),
PC: CallerPC(skipDepth + 1),
}
}
// CallerText returns a textual dump of the stack starting skipDepth frames before
// the caller. (CallerText(0) provides a dump starting at the caller of this
// function.)
func CallerText(skipDepth int) []byte {
for n := 1 << 10; ; n *= 2 {
buf := make([]byte, n)
n := runtime.Stack(buf, false)
if n < len(buf) {
return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n])
}
}
}
// CallerPC returns a dump of the program counters of the stack starting
// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at
// the caller of this function.)
func CallerPC(skipDepth int) []uintptr {
for n := 1 << 8; ; n *= 2 {
buf := make([]uintptr, n)
n := runtime.Callers(skipDepth+2, buf)
if n < len(buf) {
return buf[:n]
}
}
}
// pruneFrames removes the topmost skipDepth frames of the first goroutine in a
// textual stack dump. It overwrites the passed-in slice.
//
// If there are fewer than skipDepth frames in the first goroutine's stack,
// pruneFrames prunes it to an empty stack and leaves the remaining contents
// intact.
func pruneFrames(skipDepth int, stack []byte) []byte {
headerLen := 0
for i, c := range stack {
if c == '\n' {
headerLen = i + 1
break
}
}
if headerLen == 0 {
return stack // No header line - not a well-formed stack trace.
}
skipLen := headerLen
skipNewlines := skipDepth * 2
for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ {
c := stack[skipLen]
if c != '\n' {
continue
}
skipNewlines--
skipLen++
if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' {
break
}
}
pruned := stack[skipLen-headerLen:]
copy(pruned, stack[:headerLen])
return pruned
}