2020-01-08 07:10:37 +00:00
|
|
|
package rate
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"net/http"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2020-10-28 22:39:59 +00:00
|
|
|
"github.com/diamondburned/arikawa/v2/internal/moreatomic"
|
2020-10-28 21:31:15 +00:00
|
|
|
"github.com/pkg/errors"
|
2020-01-08 07:10:37 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// ExtraDelay because Discord is trash. I've seen this in both litcord and
|
2020-10-28 21:31:15 +00:00
|
|
|
// discordgo, with dgo claiming from experiments.
|
2020-01-08 07:10:37 +00:00
|
|
|
// RE: Those who want others to fix it for them: release the source code then.
|
|
|
|
const ExtraDelay = 250 * time.Millisecond
|
|
|
|
|
2020-11-25 20:08:42 +00:00
|
|
|
// ErrTimedOutEarly is the error returned by Limiter.Acquire, if a rate limit
|
|
|
|
// exceeds the deadline of the context.Context.
|
|
|
|
var ErrTimedOutEarly = errors.New("rate: rate limit exceeds context deadline")
|
|
|
|
|
2020-01-08 07:10:37 +00:00
|
|
|
// This makes me suicidal.
|
|
|
|
// https://github.com/bwmarrin/discordgo/blob/master/ratelimit.go
|
|
|
|
|
|
|
|
type Limiter struct {
|
|
|
|
// Only 1 per bucket
|
|
|
|
CustomLimits []*CustomRateLimit
|
|
|
|
|
2020-02-09 20:54:16 +00:00
|
|
|
Prefix string
|
|
|
|
|
2020-12-12 01:58:52 +00:00
|
|
|
global int64 // atomic guarded, unixnano
|
|
|
|
|
|
|
|
bucketMu sync.Mutex
|
|
|
|
buckets map[string]*bucket
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type CustomRateLimit struct {
|
|
|
|
Contains string
|
2020-02-09 20:54:16 +00:00
|
|
|
Reset time.Duration
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type bucket struct {
|
2020-05-06 07:40:26 +00:00
|
|
|
lock moreatomic.CtxMutex
|
2020-01-08 07:10:37 +00:00
|
|
|
custom *CustomRateLimit
|
|
|
|
|
|
|
|
remaining uint64
|
|
|
|
|
|
|
|
reset time.Time
|
|
|
|
lastReset time.Time // only for custom
|
|
|
|
}
|
|
|
|
|
2020-05-06 07:40:26 +00:00
|
|
|
func newBucket() *bucket {
|
|
|
|
return &bucket{
|
|
|
|
lock: *moreatomic.NewCtxMutex(),
|
|
|
|
remaining: 1,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-09 20:54:16 +00:00
|
|
|
func NewLimiter(prefix string) *Limiter {
|
2020-01-08 07:10:37 +00:00
|
|
|
return &Limiter{
|
2020-02-09 20:54:16 +00:00
|
|
|
Prefix: prefix,
|
2020-12-12 01:58:52 +00:00
|
|
|
buckets: map[string]*bucket{},
|
2020-01-19 17:52:43 +00:00
|
|
|
CustomLimits: []*CustomRateLimit{},
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Limiter) getBucket(path string, store bool) *bucket {
|
2020-02-09 20:54:16 +00:00
|
|
|
path = ParseBucketKey(strings.TrimPrefix(path, l.Prefix))
|
2020-01-08 18:43:15 +00:00
|
|
|
|
2020-12-12 01:58:52 +00:00
|
|
|
l.bucketMu.Lock()
|
|
|
|
defer l.bucketMu.Unlock()
|
|
|
|
|
|
|
|
bc, ok := l.buckets[path]
|
2020-01-08 07:10:37 +00:00
|
|
|
if !ok && !store {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if !ok {
|
2020-05-06 07:40:26 +00:00
|
|
|
bc := newBucket()
|
2020-01-08 07:10:37 +00:00
|
|
|
|
|
|
|
for _, limit := range l.CustomLimits {
|
|
|
|
if strings.Contains(path, limit.Contains) {
|
|
|
|
bc.custom = limit
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-12 01:58:52 +00:00
|
|
|
l.buckets[path] = bc
|
2020-01-08 07:10:37 +00:00
|
|
|
return bc
|
|
|
|
}
|
|
|
|
|
2020-12-12 01:58:52 +00:00
|
|
|
return bc
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Limiter) Acquire(ctx context.Context, path string) error {
|
|
|
|
b := l.getBucket(path, true)
|
|
|
|
|
2020-05-06 07:32:21 +00:00
|
|
|
if err := b.lock.Lock(ctx); err != nil {
|
2020-01-08 07:10:37 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-11-25 20:08:42 +00:00
|
|
|
// Deadline until the limiter is released.
|
|
|
|
until := time.Time{}
|
|
|
|
now := time.Now()
|
2020-01-08 07:10:37 +00:00
|
|
|
|
2020-11-25 20:08:42 +00:00
|
|
|
if b.remaining == 0 && b.reset.After(now) {
|
2020-01-08 07:10:37 +00:00
|
|
|
// out of turns, gotta wait
|
2020-11-25 20:08:42 +00:00
|
|
|
until = b.reset
|
2020-01-08 07:10:37 +00:00
|
|
|
} else {
|
|
|
|
// maybe global rate limit has it
|
2020-12-12 01:58:52 +00:00
|
|
|
until = time.Unix(0, atomic.LoadInt64(&l.global))
|
2020-11-25 20:08:42 +00:00
|
|
|
}
|
2020-01-08 07:10:37 +00:00
|
|
|
|
2020-11-25 20:08:42 +00:00
|
|
|
if until.After(now) {
|
|
|
|
if deadline, ok := ctx.Deadline(); ok && until.After(deadline) {
|
|
|
|
return ErrTimedOutEarly
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2020-02-09 20:54:16 +00:00
|
|
|
b.lock.Unlock()
|
2020-01-08 07:10:37 +00:00
|
|
|
return ctx.Err()
|
2020-11-25 20:08:42 +00:00
|
|
|
case <-time.After(until.Sub(now)):
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if b.remaining > 0 {
|
|
|
|
b.remaining--
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release releases the URL from the locks. This doesn't need a context for
|
2020-08-04 21:09:37 +00:00
|
|
|
// timing out, since it doesn't block that much.
|
2020-01-08 07:10:37 +00:00
|
|
|
func (l *Limiter) Release(path string, headers http.Header) error {
|
|
|
|
b := l.getBucket(path, false)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:09:37 +00:00
|
|
|
// TryUnlock because Release may be called when Acquire has not been.
|
|
|
|
defer b.lock.TryUnlock()
|
2020-01-08 07:10:37 +00:00
|
|
|
|
|
|
|
// Check custom limiter
|
|
|
|
if b.custom != nil {
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
if now.Sub(b.lastReset) >= b.custom.Reset {
|
|
|
|
b.lastReset = now
|
|
|
|
b.reset = now.Add(b.custom.Reset)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-01 22:25:54 +00:00
|
|
|
// Check if headers is nil or not:
|
|
|
|
if headers == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-08 07:10:37 +00:00
|
|
|
var (
|
|
|
|
// boolean
|
|
|
|
global = headers.Get("X-RateLimit-Global")
|
|
|
|
|
|
|
|
// seconds
|
|
|
|
remaining = headers.Get("X-RateLimit-Remaining")
|
2020-10-28 21:31:15 +00:00
|
|
|
reset = headers.Get("X-RateLimit-Reset") // float
|
2020-01-08 07:10:37 +00:00
|
|
|
retryAfter = headers.Get("Retry-After")
|
|
|
|
)
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case retryAfter != "":
|
|
|
|
i, err := strconv.Atoi(retryAfter)
|
|
|
|
if err != nil {
|
2020-10-28 21:31:15 +00:00
|
|
|
return errors.Wrapf(err, "invalid retryAfter %q", retryAfter)
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 21:31:15 +00:00
|
|
|
at := time.Now().Add(time.Duration(i) * time.Second)
|
2020-01-08 07:10:37 +00:00
|
|
|
|
|
|
|
if global != "" { // probably true
|
2020-12-12 01:58:52 +00:00
|
|
|
atomic.StoreInt64(&l.global, at.UnixNano())
|
2020-01-08 07:10:37 +00:00
|
|
|
} else {
|
|
|
|
b.reset = at
|
|
|
|
}
|
|
|
|
|
|
|
|
case reset != "":
|
|
|
|
unix, err := strconv.ParseFloat(reset, 64)
|
|
|
|
if err != nil {
|
2020-05-16 21:14:49 +00:00
|
|
|
return errors.Wrap(err, "invalid reset "+reset)
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 21:31:15 +00:00
|
|
|
sec := int64(unix)
|
|
|
|
nsec := int64((unix - float64(sec)) * float64(time.Second))
|
|
|
|
|
|
|
|
b.reset = time.Unix(sec, nsec).Add(ExtraDelay)
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if remaining != "" {
|
|
|
|
u, err := strconv.ParseUint(remaining, 10, 64)
|
|
|
|
if err != nil {
|
2020-05-16 21:14:49 +00:00
|
|
|
return errors.Wrap(err, "invalid remaining "+remaining)
|
2020-01-08 07:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
b.remaining = u
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|