Browse Source

add a lot

master
realxlfd 4 weeks ago
parent
commit
d7f9615fdf
  1. 33
      README.MD
  2. 5
      go.mod
  3. 11
      go.sum
  4. 21
      net/apps/multithread_downloader/control.go
  5. 11
      net/apps/multithread_downloader/distributor.go
  6. 14
      net/apps/multithread_downloader/downloader.go
  7. 67
      proc/thread/distributor/distri.go
  8. 108
      proc/thread/distributor/distributor_test.go
  9. 114
      proc/thread/pool/closure_pool.go
  10. 78
      proc/thread/pool/pool.go
  11. 66
      proc/thread/pool/pool_test.go
  12. 145
      proc/threadpool/pool.go
  13. 2
      utils/containers/queue/queue.go
  14. 7
      utils/containers/queue/sync_queue.go
  15. 41
      utils/containers/queue/sync_queue_test.go
  16. 8
      utils/str/index.go
  17. 13
      utils/str/str_test.go

33
README.MD

@ -6,35 +6,4 @@
## 说明
- `cli` 命令行工具
- `git.realxlfd.cc/RealXLFD/cli/ui` 一些通用配置
- `git.realxlfd.cc/RealXLFD/cli/spinner` spinner,兼容 cui
- `git.realxlfd.cc/RealXLFD/cli/logger` 日志
- `cliapps` 命令行应用的 Go 绑定
- `git.realxlfd.cc/RealXLFD/ffmpeg` ffmpeg cli 的 Go 绑定
- `git.realxlfd.cc/RealXLFD/vips` vips cli 的 Go 绑定
- `net` 网络工具
- `apis` 一些网络应用程序 API 的 Go 绑定
- `git.realxlfd.cc/RealXLFD/net/apis/tmdb` The Movie Database API 的 Go 绑定,内置了 Web 爬虫
- `git.realxlfd.cc/RealXLFD/net/apis/openai` OpenAI API 的 Go 绑定
- `git.realxlfd.cc/RealXLFD/net/apis/skysnow` 天雪站爬虫 (开发中)
- `utils` 网络通用工具
- `git.realxlfd.cc/RealXLFD/net/utils/cookie` 用于反序列化 Set-Cookie 标头或者序列化 Cookie
- `git.realxlfd.cc/RealXLFD/net/utils/crawler` 基于 ChromeWebDriver 和 Selenium 的自动化工具
- `git.realxlfd.cc/RealXLFD/net/utils/urlbuilder` URL 构建器
- `apps`
- `git.realxlfd.cc/RealXLFD/net/apps/multithread_downloader` 多线程下载器
- `git.realxlfd.cc/RealXLFD/net/apps/reverse_proxy` 反向代理
- `utils` 一般通用工具
- `git.realxlfd.cc/RealXLFD/utils/file/str` 高级字符串序列处理
- `file` 文件操作
- `git.realxlfd.cc/RealXLFD/utils/file/dcom` 多密码解压缩工具
- `git.realxlfd.cc/RealXLFD/utils/file/gozip` zip 压缩
- `git.realxlfd.cc/RealXLFD/utils/file/fscan` 文件扫描器
- `git.realxlfd.cc/RealXLFD/utils/file/fos` 文件操作 (移动、拷贝、删除空文件夹...)
- `proc` 处理流
- `git.realxlfd.cc/RealXLFD/utils/threadpool` 线程池
自用,懒得写

5
go.mod

@ -7,8 +7,8 @@ require (
github.com/dustin/go-humanize v1.0.1
github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203
github.com/gookit/color v1.5.4
github.com/k0kubun/pp/v3 v3.2.0
github.com/parnurzeal/gorequest v0.3.0
github.com/rs/xid v1.5.0
github.com/stretchr/testify v1.8.4
github.com/tebeka/selenium v0.9.9
)
@ -19,8 +19,6 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/elazarl/goproxy v0.0.0-20231117061959-7cc037d33fb5 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/moul/http2curl v1.0.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@ -28,6 +26,5 @@ require (
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect
golang.org/x/net v0.22.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

11
go.sum

@ -54,12 +54,6 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/k0kubun/pp/v3 v3.2.0 h1:h33hNTZ9nVFNP3u2Fsgz8JXiF5JINoZfFq4SvKJwNcs=
github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapdz1EwA=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI=
@ -69,6 +63,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
@ -134,7 +130,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
@ -150,8 +145,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

21
net/apps/multithread_downloader/control.go

@ -59,24 +59,3 @@ func (c *Client) Download(
c.tasks = append(c.tasks, downloader)
return downloader, nil
}
func (distri *Distributor) routine() uint {
for distri.running {
select {
case acquire := <-distri.acquire:
switch free := distri.MaxThread - distri.currentThread; {
case free >= acquire:
distri.currentThread += acquire
distri.getter <- acquire
case free <= 0:
release := <-distri.release
distri.getter <- release
default:
distri.currentThread += free
}
case release := <-distri.release:
}
}
}

11
net/apps/multithread_downloader/distributor.go

@ -1,11 +0,0 @@
package mdown
type Distributor struct {
running bool
MaxThread uint
MaxThreadPerReq uint
currentThread uint
release chan uint
acquire chan uint
getter chan uint
}

14
net/apps/multithread_downloader/downloader.go

@ -1,10 +1,10 @@
package mdown
import (
`net/http`
`net/url`
`os`
`strconv`
"net/http"
"net/url"
"os"
"strconv"
)
type Downloader struct {
@ -66,10 +66,6 @@ func (d *Downloader) head(distri *Distributor) {
acceptRanges := resp.Header.Get("accept-ranges")
d.AcceptRanges = acceptRanges == "bytes"
d.State = WAITING
client.acquire(client.)
client.acquire(client)
d.generator()
}
func (d *Downloader) generator(thread uint) {
}

67
proc/thread/distributor/distri.go

@ -0,0 +1,67 @@
package distri
import (
"sync"
)
type Distributor struct {
running bool
MaxThread int
free int
lock sync.Mutex
cond sync.Cond
}
func New(max int) *Distributor {
return &Distributor{
false,
max, max,
sync.Mutex{},
sync.Cond{},
}
}
func (d *Distributor) Run() *Distributor {
d.running = true
return d
}
func (d *Distributor) Acquire(count int) int {
d.lock.Lock()
if !d.running {
return 0
}
defer d.lock.Unlock()
for d.free <= 0 {
d.cond.Wait()
if !d.running {
return 0
}
}
var acquired int
switch {
case d.free <= count:
acquired = d.free
d.free = 0
case d.free > count:
d.free -= count
acquired = count
}
return acquired
}
func (d *Distributor) Release(count int) {
d.lock.Lock()
if !d.running {
return
}
defer d.lock.Unlock()
d.free += count
d.cond.Signal()
}
func (d *Distributor) Close() {
if !d.running {
return
}
d.running = false
d.cond.Broadcast()
}

108
proc/thread/distributor/distributor_test.go

@ -0,0 +1,108 @@
package distri_test
import (
"sync"
"testing"
"time"
distri "git.realxlfd.cc/RealXLFD/golib/proc/thread/distributor"
"github.com/stretchr/testify/assert"
)
func TestDistributor_AcquireAndRelease(t *testing.T) {
maxThreads := 10
d := distri.New(maxThreads).Run()
// Acquire less than max
acquired := d.Acquire(5)
assert.Equal(t, 5, acquired, "Should acquire 5 threads")
// Release and then acquire again
d.Release(5)
acquired = d.Acquire(5)
assert.Equal(
t,
5,
acquired,
"Should re-acquire 5 threads after release",
)
// Try to acquire more than available
acquired = d.Acquire(10)
assert.Equal(
t,
5,
acquired,
"Should only acquire 5 threads because that's what's left",
)
d.Release(5)
}
func TestDistributor_Close(t *testing.T) {
d := distri.New(10).Run()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
acquired := d.Acquire(1)
assert.Equal(
t,
1,
acquired,
"Should acquire 0 threads after close",
)
}()
time.Sleep(time.Millisecond * 100) // Ensuring goroutine calls Acquire before Close
d.Close()
wg.Wait()
}
func TestDistributor_Concurrency(t *testing.T) {
d := distri.New(100).Run()
var wg sync.WaitGroup
acquireAndRelease := func(count int) {
defer wg.Done()
acquired := d.Acquire(count)
assert.Equal(
t,
count,
acquired,
"Should acquire the exact count of threads",
)
time.Sleep(time.Millisecond * 10) // Simulate work
d.Release(count)
}
totalThreads := 100
for i := 0; i < 10; i++ {
wg.Add(1)
go acquireAndRelease(totalThreads / 10)
}
wg.Wait()
// Check if all threads are released properly
acquired := d.Acquire(totalThreads)
assert.Equal(
t,
totalThreads,
acquired,
"Should be able to acquire all threads after concurrent operations",
)
d.Release(totalThreads)
}
func BenchmarkDistributor_AcquireRelease(b *testing.B) {
d := distri.New(100).Run()
b.ResetTimer()
for i := 0; i < b.N; i++ {
d.Acquire(1)
d.Release(1)
}
}

114
proc/thread/pool/closure_pool.go

@ -0,0 +1,114 @@
package pool
import (
"sync"
"sync/atomic"
)
// ClosurePool 线程池结构
type ClosurePool struct {
group sync.WaitGroup // 当前并行任务数量,用于阻塞以等待完成
current atomic.Int32 // 当前并行线程数量
state State // 状态
max int // 最大并行数量
queue chan func() // 缓冲区,用于允许池启动前先填充任务
exit chan bool
}
// NewClosure 创建一个闭包线程池
func NewClosure(max int, cache int) *ClosurePool {
if max < 0 || cache < 0 {
panic("线程池最大并行数量为正整数!")
}
return &ClosurePool{
group: sync.WaitGroup{},
current: atomic.Int32{},
state: Ready,
max: max,
queue: make(chan func(), cache),
exit: make(chan bool),
}
}
// State 获取池状态
func (p *ClosurePool) State() State {
return p.state
}
// Max 获取池最大并行数量
func (p *ClosurePool) Max() int {
return p.max
}
// Current 获取目前并行线程数
func (p *ClosurePool) Current() int {
return int(p.current.Load())
}
// Run 启动线程池,开始执行
func (p *ClosurePool) Run() *ClosurePool {
// 防止重复调用
if p.state == Running {
return p
}
p.state = Running
// 启动worker
for i := 0; i < p.max; i++ {
go p.worker()
}
return p
}
func (p *ClosurePool) worker() {
for p.state != Stopped {
task, ok := <-p.queue
// 用于强制退出
if !ok {
break
}
p.current.Add(1)
// 启动任务线程
task()
p.current.Add(-1)
p.group.Done()
}
}
// Join 等待完成,并且停止接受数据
func (p *ClosurePool) Join() {
// 防止重复调用
if p.state == Stopping || p.state == Stopped {
return
}
close(p.queue)
p.state = Stopping
p.group.Wait()
p.state = Stopped
}
// Abort 中断线程池
func (p *ClosurePool) Abort() {
// 防止重复调用
if p.state == Stopped {
return
}
p.state = Stopped
close(p.queue)
}
// Push 用于外界向线程池推送任务
func (p *ClosurePool) Push(f func()) {
if p.state == Stopped || p.state == Stopping {
panic("在一个已经关闭了的线程上发送任务")
}
p.add(f)
return
}
func (p *ClosurePool) Queued() int {
return len(p.queue)
}
// add 添加任务的具体实现
func (p *ClosurePool) add(f func()) {
p.group.Add(1)
p.queue <- f
}

78
proc/thread/pool/pool.go

@ -3,57 +3,54 @@ package pool
import (
"sync"
"sync/atomic"
"time"
"git.realxlfd.cc/RealXLFD/golib/utils/containers/queue"
)
// Pool 线程池结构
type Pool struct {
group *sync.WaitGroup // 当前并行任务数量,用于阻塞以等待完成
current atomic.Int32 // 当前并行线程数量
state State // 状态
max int // 最大并行数量
InQueue *queue.SyncQueue[any]
OutQueue *queue.SyncQueue[any]
Task func(any) any
exit chan bool
type Pool[I any, O any] struct {
group sync.WaitGroup // 当前并行任务数量,用于阻塞以等待完成
current atomic.Int32 // 当前并行线程数量
state State // 状态
max int // 最大并行数量
inQueue *queue.SyncQueue[I]
OutQueue *queue.SyncQueue[O]
Task func(I) O
}
// New 创建一个线程池
func New(max int) *Pool {
func New[I any, O any](max int, task func(I) O) *Pool[I, O] {
if max < 0 {
panic("线程池最大并行数量为正整数!")
}
return &Pool{
&sync.WaitGroup{},
return &Pool[I, O]{
sync.WaitGroup{},
atomic.Int32{},
Ready,
max,
queue.NewSync[any](),
queue.NewSync[any](),
func(any) any { return nil },
make(chan bool),
queue.NewSync[I](),
queue.NewSync[O](),
task,
}
}
// State 获取池状态
func (p *Pool) State() State {
func (p *Pool[I, O]) State() State {
return p.state
}
// Max 获取池最大并行数量
func (p *Pool) Max() int {
func (p *Pool[I, O]) Max() int {
return p.max
}
// Current 获取目前并行线程数
func (p *Pool) Current() int {
func (p *Pool[I, O]) Current() int {
return int(p.current.Load())
}
// Run 启动线程池,开始执行
func (p *Pool) Run() *Pool {
func (p *Pool[I, O]) Run() *Pool[I, O] {
// 防止重复调用
if p.state == Running {
return p
@ -66,9 +63,9 @@ func (p *Pool) Run() *Pool {
return p
}
func (p *Pool) worker() {
func (p *Pool[I, O]) worker() {
for p.state != Stopped {
input, exit := p.InQueue.SyncPop()
input, exit := p.inQueue.SyncPop()
// 用于强制退出
if exit {
break
@ -82,53 +79,54 @@ func (p *Pool) worker() {
}
// Join 等待完成,并且停止接受数据
func (p *Pool) Join() {
func (p *Pool[I, O]) Join() {
// 防止重复调用
if p.state == Stopping || p.state == Stopped {
return
}
p.InQueue.Exit()
p.state = Stopping
p.group.Wait()
for {
if p.InQueue.Size() != 0 {
time.Sleep(100 * time.Millisecond)
} else {
break
}
}
p.group.Wait()
p.inQueue.Close()
p.state = Stopped
}
// Abort 中断线程池
func (p *Pool) Abort() {
func (p *Pool[I, O]) Abort() {
// 防止重复调用
if p.state == Stopped {
return
}
p.state = Stopped
p.InQueue.Exit()
p.inQueue.Close()
}
// Push 用于外界向线程池推送任务
func (p *Pool) Push(input any) {
func (p *Pool[I, O]) Push(input ...I) {
if p.state == Stopped || p.state == Stopping {
panic("在一个已经关闭了的线程上发送任务")
panic("推送任务至一个已经关闭了的池")
}
p.group.Add(1)
p.InQueue.Push(input)
p.inQueue.Push(input...)
}
func (p *Pool) GetResult() any {
// GetResult 获取一个结果(如果没有任何结果,将会等待)
func (p *Pool[I, O]) GetResult() any {
result, _ := p.OutQueue.SyncPop()
return result
}
func (p *Pool) GetAllResult() (result []any) {
// GetAllResult 获取所有结果,将会自动停止池
func (p *Pool[I, O]) GetAllResult() (result []any) {
p.Join()
for p.OutQueue.Size() != 0 {
result = append(result, p.OutQueue.Pop())
}
return result
}
func (p *Pool[I, O]) GetOutQueue() *queue.SyncQueue[O] {
return p.OutQueue
}
func (p *Pool[I, O]) Queued() int {
return p.inQueue.Size()
}

66
proc/thread/pool/pool_test.go

@ -1,23 +1,26 @@
package pool
import (
"runtime"
"testing"
"github.com/rs/xid"
"github.com/stretchr/testify/assert"
)
func TestPool(t *testing.T) {
maxWorkers := 5
pool := New(maxWorkers)
pool := New(
maxWorkers, func(input any) any {
n := input.(int)
return n * n
},
)
assert.Equal(t, Ready, pool.State(), "线程池初始化状态应为Ready")
assert.Equal(t, 0, pool.Current(), "线程池初始化时没有运行的goroutine")
// 设置任务:简单的数学运算
pool.Task = func(input any) any {
n := input.(int)
return n * n
}
// 启动线程池
pool.Run()
@ -33,3 +36,56 @@ func TestPool(t *testing.T) {
// 检查OutQueue中是否有10个结果
assert.Equal(t, 10, pool.OutQueue.Size(), "应该有10个处理结果")
}
func mission(i []byte) [][]byte {
_ = i
return [][]byte{
[]byte(xid.New().String()), []byte(xid.New().String()),
[]byte(xid.New().String()),
}
}
func BenchmarkPool(b *testing.B) {
maxWorkers := runtime.NumCPU()
pool := New(
maxWorkers, mission,
)
for range b.N {
id := xid.New().String()
pool.Push(
[]byte(id),
)
}
b.ResetTimer()
pool.Run()
pool.Join()
}
func BenchmarkClosurePoolWithNoCache(b *testing.B) {
maxWorkers := runtime.NumCPU()
pool := NewClosure(maxWorkers, b.N+1)
result := make(chan [][]byte)
exit := false
go func() {
for !exit {
_ = <-result
}
}()
for range b.N {
id := xid.New().String()
buf := []byte(id)
pool.Push(
func() {
_ = buf
result <- [][]byte{
[]byte(xid.New().String()), []byte(xid.New().String()),
[]byte(xid.New().String()),
}
},
)
}
b.ResetTimer()
pool.Run()
pool.Join()
close(result)
exit = true
}

145
proc/threadpool/pool.go

@ -1,145 +0,0 @@
package threadpool
import (
"sync"
"sync/atomic"
"time"
)
// Pool 线程池结构
type Pool struct {
group sync.WaitGroup // 当前并行任务数量,用于阻塞以等待完成
current int32 // 当前并行线程数量
state State // 状态
max int32 // 最大并行数量
queueCurrent int32 // 缓冲队列任务数量
queue chan func() // 缓冲区,用于允许池启动前先填充任务
exit chan bool
}
// State 标识线程池状态
type State int
const (
Ready = iota // 准备状态,池启动前的阶段
Running // 运行
Stopping // 停止前的的等待状态,等待剩余任务执行完成
Stopped // 停止,此时强制暂停后续任务执行,添加则重新载入缓冲区
)
// New 创建一个线程池
func New(max int, cache int) *Pool {
if max < 0 || cache < 0 {
panic("线程池最大并行数量为正整数!")
}
return &Pool{
group: sync.WaitGroup{},
current: 0,
state: Ready,
max: int32(max),
queueCurrent: 0,
queue: make(
chan func(),
cache,
),
exit: make(chan bool),
}
}
// State 获取池状态
func (p *Pool) State() State {
return p.state
}
// Max 获取池最大并行数量
func (p *Pool) Max() int {
return int(p.max)
}
// Current 获取目前并行线程数
func (p *Pool) Current() int {
return int(p.current)
}
// Run 启动线程池,开始执行
func (p *Pool) Run() *Pool {
// 防止重复调用
if p.state == Running {
return p
}
p.state = Running
var worker = func() {
for p.state != Stopped {
task, ok := <-p.queue
// 用于强制退出
if !ok {
break
}
atomic.AddInt32(
&p.current,
1,
)
// 启动任务线程
task()
atomic.AddInt32(
&p.current,
-1,
)
p.group.Done()
}
}
// 启动worker
for i := 0; i < int(p.max); i++ {
go worker()
}
return p
}
// Join 等待完成,并且停止接受数据
func (p *Pool) Join() {
// 防止重复调用
if p.state == Stopping || p.state == Stopped {
return
}
close(p.queue)
p.state = Stopping
for {
if len(p.queue) != 0 {
time.Sleep(120 * time.Millisecond)
} else {
break
}
}
p.group.Wait()
p.state = Stopped
}
// Abort 中断线程池
func (p *Pool) Abort() {
// 防止重复调用
if p.state == Stopped {
return
}
p.state = Stopped
close(p.queue)
}
// Push 用于外界向线程池推送任务
func (p *Pool) Push(f func()) {
if p.state == Stopped || p.state == Stopping {
panic("在一个已经关闭了的线程上发送任务")
}
p.add(f)
return
}
// Private
// add 添加任务的具体实现
func (p *Pool) add(f func()) {
p.queue <- f
p.group.Add(1)
atomic.AddInt32(
&p.queueCurrent,
1,
)
}

2
utils/containers/queue/queue.go

@ -38,8 +38,6 @@ func (q *Queue[T]) Head() T {
return q.buf[q.head]
}
// 0 1 2 3
// 1 2 3
func (q *Queue[T]) relocate() {
q.endure = len(q.buf) / 2
newData := make([]T, len(q.buf)-q.head)

7
utils/containers/queue/sync_queue.go

@ -27,14 +27,15 @@ func (s *SyncQueue[T]) Size() int {
func (s *SyncQueue[T]) Push(elems ...T) {
s.lock.Lock()
defer s.lock.Unlock()
s.queue.Push(elems...)
s.cond.Signal()
s.lock.Unlock()
}
func (s *SyncQueue[T]) Exit() {
func (s *SyncQueue[T]) Close() {
s.lock.Lock()
defer s.lock.Unlock()
s.cond.Broadcast()
s.lock.Unlock()
}
func (s *SyncQueue[T]) SyncPop() (T, bool) {
s.lock.Lock()

41
utils/containers/queue/sync_queue_test.go

@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/rs/xid"
"github.com/stretchr/testify/assert"
)
@ -48,26 +49,30 @@ func TestSyncQueueConcurrent(t *testing.T) {
wg.Wait()
}
func BenchmarkChannel(b *testing.B) {
q := make(chan string, 100)
go func() {
func BenchmarkSyncQueuePushPop(b *testing.B) {
q := NewSync[int]()
wg := sync.WaitGroup{}
wg.Add(2)
b.RunParallel(
func(pb *testing.PB) {
for pb.Next() {
wg.Add(1)
go func() {
defer wg.Done()
q.Push(1)
_ = q.Pop()
}()
}
},
)
for range b.N {
q <- xid.New().String()
}
}()
for range b.N {
_ = <-q
}
}
wg.Wait()
func BenchmarkSyncQueuePushPop(b *testing.B) {
q := NewSync[string]()
go func() {
for range b.N {
q.Push(xid.New().String())
}
}()
for range b.N {
q.SyncPop()
}
q.Close()
}
func BenchmarkSyncQueueSyncPop(b *testing.B) {

8
utils/str/index.go

@ -26,8 +26,8 @@ func TrimEnd(str string, maxLength int) string {
}
func Join(ss ...string) string {
builder := strings.Builder{}
for _, s := range ss {
builder.WriteString(s)
for index := range ss {
builder.WriteString(ss[index])
}
return builder.String()
}
@ -36,8 +36,8 @@ func Each(
ss []string, f func(index int, s string) (string, bool),
) []string {
var result []string
for index, s := range ss {
if s, ok := f(index, s); ok {
for index := range ss {
if s, ok := f(index, ss[index]); ok {
result = append(result, s)
}
}

13
test/str_test.go → utils/str/str_test.go

@ -1,15 +1,14 @@
package test
package str
import (
"testing"
"git.realxlfd.cc/RealXLFD/golib/utils/str"
"github.com/stretchr/testify/assert"
)
func TestNewFilter(t *testing.T) {
preclude := []string{"test1", "test2"}
filter := str.NewFilter(preclude...)
filter := NewFilter(preclude...)
assert.True(t, filter.Contains("test1"))
assert.True(t, filter.Contains("test2"))
@ -18,7 +17,7 @@ func TestNewFilter(t *testing.T) {
func TestContains(t *testing.T) {
preclude := []string{"test1", "test2"}
filter := str.NewFilter(preclude...)
filter := NewFilter(preclude...)
assert.True(t, filter.Contains("test1"))
assert.False(t, filter.Contains("test3"))
@ -26,7 +25,7 @@ func TestContains(t *testing.T) {
func TestGetCount(t *testing.T) {
preclude := []string{"test1", "test2"}
filter := str.NewFilter(preclude...)
filter := NewFilter(preclude...)
filter.Count("test1")
filter.Count("test1")
@ -37,7 +36,7 @@ func TestGetCount(t *testing.T) {
func TestCount(t *testing.T) {
preclude := []string{"test1", "test2"}
filter := str.NewFilter(preclude...)
filter := NewFilter(preclude...)
filter.Count("test1")
filter.Count("test1")
@ -49,7 +48,7 @@ func TestCount(t *testing.T) {
func TestCountWithNonExistentKey(t *testing.T) {
preclude := []string{"test1", "test2"}
filter := str.NewFilter(preclude...)
filter := NewFilter(preclude...)
filter.Count("test3")
Loading…
Cancel
Save