internode lockArgs should use messagepack (#13329)

it would seem like using `bufio.Scan()` is very
slow for heavy concurrent I/O, ie. when r.Body
is slow , instead use a proper
binary exchange format, to marshal and unmarshal
the LockArgs datastructure in a cleaner way.

this PR increases performance of the locking
sub-system for tiny repeated read lock requests
on same object.

```
BenchmarkLockArgs
BenchmarkLockArgs-4              6417609               185.7 ns/op            56 B/op          2 allocs/op
BenchmarkLockArgsOld
BenchmarkLockArgsOld-4           1187368              1015 ns/op            4096 B/op          1 allocs/op
```
This commit is contained in:
Harshavardhana
2021-09-30 11:53:01 -07:00
committed by GitHub
parent d00ff3c453
commit ffd497673f
11 changed files with 545 additions and 83 deletions

View File

@@ -22,7 +22,6 @@ import (
"context"
"io"
"net/url"
"strconv"
"github.com/minio/minio/internal/dsync"
"github.com/minio/minio/internal/http"
@@ -89,17 +88,13 @@ func (client *lockRESTClient) Close() error {
// restCall makes a call to the lock REST server.
func (client *lockRESTClient) restCall(ctx context.Context, call string, args dsync.LockArgs) (reply bool, err error) {
values := url.Values{}
values.Set(lockRESTUID, args.UID)
values.Set(lockRESTOwner, args.Owner)
values.Set(lockRESTSource, args.Source)
values.Set(lockRESTQuorum, strconv.Itoa(args.Quorum))
var buffer bytes.Buffer
for _, resource := range args.Resources {
buffer.WriteString(resource)
buffer.WriteString("\n")
argsBytes, err := args.MarshalMsg(metaDataPoolGet()[:0])
if err != nil {
return false, err
}
respBody, err := client.callWithContext(ctx, call, values, &buffer, -1)
defer metaDataPoolPut(argsBytes)
body := bytes.NewReader(argsBytes)
respBody, err := client.callWithContext(ctx, call, nil, body, body.Size())
defer http.DrainBody(respBody)
switch err {
case nil: