...
 
Commits (3)
This diff is collapsed.
......@@ -1463,6 +1463,13 @@ func getBucketLocationURL(endPoint, bucketName string) string {
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
}
// return URL For set/get lifecycle of the bucket.
func getBucketLifecycleURL(endPoint, bucketName string) (ret string) {
queryValue := url.Values{}
queryValue.Set("lifecycle", "")
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
}
// return URL for listing objects in the bucket with V1 legacy API.
func getListObjectsV1URL(endPoint, bucketName, prefix, maxKeys, encodingType string) string {
queryValue := url.Values{}
......@@ -2070,6 +2077,12 @@ func registerBucketLevelFunc(bucket *mux.Router, api objectAPIHandlers, apiFunct
case "GetBucketPolicy":
// Register Get Bucket policy HTTP Handler.
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
case "GetBucketLifecycle":
bucket.Methods("GET").HandlerFunc(api.GetBucketLifecycleHandler).Queries("lifecycle", "")
case "PutBucketLifecycle":
bucket.Methods("PUT").HandlerFunc(api.PutBucketLifecycleHandler).Queries("lifecycle", "")
case "DeleteBucketLifecycle":
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketLifecycleHandler).Queries("lifecycle", "")
case "GetBucketLocation":
// Register GetBucketLocation handler.
bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "")
......
......@@ -108,23 +108,52 @@ type xlSets struct {
mrfUploads map[string]int
}
// isConnected - checks if the endpoint is connected or not.
func (s *xlSets) isConnected(endpointStr string) bool {
func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool {
disk := diskMap[endpoint]
if disk == nil {
return false
}
return disk.IsOnline()
}
func (s *xlSets) getOnlineDisksCount() int {
s.xlDisksMu.RLock()
defer s.xlDisksMu.RUnlock()
count := 0
for i := 0; i < s.setCount; i++ {
for j := 0; j < s.drivesPerSet; j++ {
disk := s.xlDisks[i][j]
if disk == nil {
continue
}
if !disk.IsOnline() {
continue
}
count++
}
}
return count
}
func (s *xlSets) getDiskMap() map[string]StorageAPI {
diskMap := make(map[string]StorageAPI)
s.xlDisksMu.RLock()
defer s.xlDisksMu.RUnlock()
for i := 0; i < s.setCount; i++ {
for j := 0; j < s.drivesPerSet; j++ {
if s.xlDisks[i][j] == nil {
disk := s.xlDisks[i][j]
if disk == nil {
continue
}
if s.xlDisks[i][j].String() != endpointStr {
if !disk.IsOnline() {
continue
}
return s.xlDisks[i][j].IsOnline()
diskMap[disk.String()] = disk
}
}
return false
return diskMap
}
// Initializes a new StorageAPI from the endpoint argument, returns
......@@ -172,30 +201,11 @@ func findDiskIndex(refFormat, format *formatXLV3) (int, int, error) {
// connectDisksWithQuorum is same as connectDisks but waits
// for quorum number of formatted disks to be online in any given sets.
func (s *xlSets) connectDisksWithQuorum() {
var onlineDisks int
for onlineDisks < len(s.endpoints)/2 {
for i, endpoint := range s.endpoints {
if s.isConnected(s.endpointStrings[i]) {
continue
}
disk, format, err := connectEndpoint(endpoint)
if err != nil {
printEndpointError(endpoint, err)
continue
}
i, j, err := findDiskIndex(s.format, format)
if err != nil {
// Close the internal connection to avoid connection leaks.
disk.Close()
printEndpointError(endpoint, err)
continue
}
disk.SetDiskID(format.XL.This)
s.xlDisks[i][j] = disk
onlineDisks++
for {
s.connectDisks()
if s.getOnlineDisksCount() > len(s.endpoints)/2 {
return
}
// Sleep for a while - so that we don't go into
// 100% CPU when half the disks are online.
time.Sleep(100 * time.Millisecond)
}
}
......@@ -203,33 +213,41 @@ func (s *xlSets) connectDisksWithQuorum() {
// connectDisks - attempt to connect all the endpoints, loads format
// and re-arranges the disks in proper position.
func (s *xlSets) connectDisks() {
var wg sync.WaitGroup
diskMap := s.getDiskMap()
for i, endpoint := range s.endpoints {
if s.isConnected(s.endpointStrings[i]) {
continue
}
disk, format, err := connectEndpoint(endpoint)
if err != nil {
printEndpointError(endpoint, err)
continue
}
setIndex, diskIndex, err := findDiskIndex(s.format, format)
if err != nil {
// Close the internal connection to avoid connection leaks.
disk.Close()
printEndpointError(endpoint, err)
if isEndpointConnected(diskMap, s.endpointStrings[i]) {
continue
}
disk.SetDiskID(format.XL.This)
s.xlDisksMu.Lock()
s.xlDisks[setIndex][diskIndex] = disk
s.xlDisksMu.Unlock()
// Send a new disk connect event with a timeout
select {
case s.disksConnectEvent <- diskConnectInfo{setIndex: setIndex}:
case <-time.After(100 * time.Millisecond):
}
wg.Add(1)
go func(endpoint Endpoint) {
defer wg.Done()
disk, format, err := connectEndpoint(endpoint)
if err != nil {
printEndpointError(endpoint, err)
return
}
setIndex, diskIndex, err := findDiskIndex(s.format, format)
if err != nil {
// Close the internal connection to avoid connection leaks.
disk.Close()
printEndpointError(endpoint, err)
return
}
disk.SetDiskID(format.XL.This)
s.xlDisksMu.Lock()
s.xlDisks[setIndex][diskIndex] = disk
s.xlDisksMu.Unlock()
go func(setIndex int) {
// Send a new disk connect event with a timeout
select {
case s.disksConnectEvent <- diskConnectInfo{setIndex: setIndex}:
case <-time.After(100 * time.Millisecond):
}
}(setIndex)
}(endpoint)
}
wg.Wait()
}
// monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected
......@@ -259,8 +277,8 @@ func (s *xlSets) GetLockers(setIndex int) func() []dsync.NetLocker {
// GetDisks returns a closure for a given set, which provides list of disks per set.
func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI {
return func() []StorageAPI {
s.xlDisksMu.Lock()
defer s.xlDisksMu.Unlock()
s.xlDisksMu.RLock()
defer s.xlDisksMu.RUnlock()
disks := make([]StorageAPI, s.drivesPerSet)
copy(disks, s.xlDisks[setIndex])
return disks
......@@ -272,13 +290,14 @@ const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs
// Initialize new set of erasure coded sets.
func newXLSets(endpoints Endpoints, format *formatXLV3, setCount int, drivesPerSet int) (*xlSets, error) {
endpointStrings := make([]string, len(endpoints))
for _, endpoint := range endpoints {
for i, endpoint := range endpoints {
if endpoint.IsLocal {
endpointStrings = append(endpointStrings, endpoint.Path)
endpointStrings[i] = endpoint.Path
} else {
endpointStrings = append(endpointStrings, endpoint.String())
endpointStrings[i] = endpoint.String()
}
}
// Initialize the XL sets instance.
s := &xlSets{
sets: make([]*xlObjects, setCount),
......
......@@ -24,12 +24,13 @@ import (
)
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
func (xl xlObjects) getLoadBalancedDisks() (disks []StorageAPI) {
func (xl xlObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
disks := xl.getDisks()
// Based on the random shuffling return back randomized disks.
for _, i := range hashOrder(UTCNow().String(), len(xl.getDisks())) {
disks = append(disks, xl.getDisks()[i-1])
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
newDisks = append(newDisks, disks[i-1])
}
return disks
return newDisks
}
// This function does the following check, suppose
......
......@@ -5,7 +5,7 @@ version: '3.7'
# 9001 through 9004.
services:
minio1:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
volumes:
- data1-1:/data1
- data1-2:/data2
......@@ -22,7 +22,7 @@ services:
retries: 3
minio2:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
volumes:
- data2-1:/data1
- data2-2:/data2
......@@ -39,7 +39,7 @@ services:
retries: 3
minio3:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
volumes:
- data3-1:/data1
- data3-2:/data2
......@@ -56,7 +56,7 @@ services:
retries: 3
minio4:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
volumes:
- data4-1:/data1
- data4-2:/data2
......
......@@ -2,7 +2,7 @@ version: '3.7'
services:
minio1:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio1
volumes:
- minio1-data:/export
......@@ -29,7 +29,7 @@ services:
retries: 3
minio2:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio2
volumes:
- minio2-data:/export
......@@ -56,7 +56,7 @@ services:
retries: 3
minio3:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio3
volumes:
- minio3-data:/export
......@@ -83,7 +83,7 @@ services:
retries: 3
minio4:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio4
volumes:
- minio4-data:/export
......
......@@ -2,7 +2,7 @@ version: '3.7'
services:
minio1:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio1
volumes:
- minio1-data:/export
......@@ -33,7 +33,7 @@ services:
retries: 3
minio2:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio2
volumes:
- minio2-data:/export
......@@ -64,7 +64,7 @@ services:
retries: 3
minio3:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio3
volumes:
- minio3-data:/export
......@@ -95,7 +95,7 @@ services:
retries: 3
minio4:
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
hostname: minio4
volumes:
- minio4-data:/export
......
......@@ -30,7 +30,7 @@ spec:
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
# Unfortunately you must manually define each server. Perhaps autodiscovery via DNS can be implemented in the future.
args:
- server
......
......@@ -22,7 +22,7 @@ spec:
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
args:
- server
- http://minio-{0...3}.minio.default.svc.cluster.local/data
......
......@@ -24,7 +24,7 @@ spec:
containers:
- name: minio
# Pulls the default Minio image from Docker Hub
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
args:
- gateway
- gcs
......
......@@ -32,7 +32,7 @@ spec:
- name: data
mountPath: "/data"
# Pulls the lastest Minio image from Docker Hub
image: minio/minio:RELEASE.2020-03-19T21-49-00Z
image: minio/minio:RELEASE.2020-03-25T07-03-04Z
args:
- server
- /data
......
......@@ -22,17 +22,43 @@ import (
"github.com/minio/minio/pkg/bucket/object/tagging"
)
var (
errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified")
)
// Filter - a filter for a lifecycle configuration Rule.
type Filter struct {
XMLName xml.Name `xml:"Filter"`
Prefix string `xml:"Prefix,omitempty"`
And And `xml:"And,omitempty"`
Tag tagging.Tag `xml:"Tag,omitempty"`
XMLName xml.Name `xml:"Filter"`
Prefix string
And And
Tag tagging.Tag
}
var (
errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified")
)
// MarshalXML - produces the xml representation of the Filter struct
// only one of Prefix, And and Tag should be present in the output.
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if err := e.EncodeToken(start); err != nil {
return err
}
switch {
case !f.And.isEmpty():
if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
return err
}
case !f.Tag.IsEmpty():
if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
return err
}
default:
// Always print Prefix field when both And & Tag are empty
if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
return err
}
}
return e.EncodeToken(xml.EndElement{Name: start.Name})
}
// Validate - validates the filter element
func (f Filter) Validate() error {
......