Best Syzkaller code snippet using gcs.Read
gcs.go
Source:gcs.go
...34 gcs.Endpoint = options.Endpoint35 gcs.StorageClass = options.StorageClass36 gcs.PredefinedAcl = options.PredefinedACL37 if options.CredentialsFile != "" {38 b, err := os.ReadFile(options.CredentialsFile)39 if err != nil {40 return errors.Trace(err)41 }42 gcs.CredentialsBlob = string(b)43 }44 return nil45}46func defineGCSFlags(flags *pflag.FlagSet) {47 // TODO: remove experimental tag if it's stable48 flags.String(gcsEndpointOption, "", "(experimental) Set the GCS endpoint URL")49 flags.String(gcsStorageClassOption, "", "(experimental) Specify the GCS storage class for objects")50 flags.String(gcsPredefinedACL, "", "(experimental) Specify the GCS predefined acl for objects")51 flags.String(gcsCredentialsFile, "", "(experimental) Set the GCS credentials file path")52}53func hiddenGCSFlags(flags *pflag.FlagSet) {54 _ = flags.MarkHidden(gcsEndpointOption)55 _ = flags.MarkHidden(gcsStorageClassOption)56 _ = flags.MarkHidden(gcsPredefinedACL)57 _ = flags.MarkHidden(gcsCredentialsFile)58}59func (options *GCSBackendOptions) parseFromFlags(flags *pflag.FlagSet) error {60 var err error61 options.Endpoint, err = flags.GetString(gcsEndpointOption)62 if err != nil {63 return errors.Trace(err)64 }65 options.StorageClass, err = flags.GetString(gcsStorageClassOption)66 if err != nil {67 return errors.Trace(err)68 }69 options.PredefinedACL, err = flags.GetString(gcsPredefinedACL)70 if err != nil {71 return errors.Trace(err)72 }73 options.CredentialsFile, err = flags.GetString(gcsCredentialsFile)74 if err != nil {75 return errors.Trace(err)76 }77 return nil78}79type gcsStorage struct {80 gcs *backuppb.GCS81 bucket *storage.BucketHandle82}83// DeleteFile delete the file in storage84func (s *gcsStorage) DeleteFile(ctx context.Context, name string) error {85 object := s.objectName(name)86 err := s.bucket.Object(object).Delete(ctx)87 return errors.Trace(err)88}89func (s *gcsStorage) objectName(name string) string {90 return path.Join(s.gcs.Prefix, name)91}92// WriteFile writes data to a file to storage.93func (s *gcsStorage) WriteFile(ctx context.Context, name string, data []byte) error {94 object := s.objectName(name)95 wc := s.bucket.Object(object).NewWriter(ctx)96 wc.StorageClass = s.gcs.StorageClass97 wc.PredefinedACL = s.gcs.PredefinedAcl98 _, err := wc.Write(data)99 if err != nil {100 return errors.Trace(err)101 }102 return wc.Close()103}104// ReadFile reads the file from the storage and returns the contents.105func (s *gcsStorage) ReadFile(ctx context.Context, name string) ([]byte, error) {106 object := s.objectName(name)107 rc, err := s.bucket.Object(object).NewReader(ctx)108 if err != nil {109 return nil, errors.Annotatef(err,110 "failed to read gcs file, file info: input.bucket='%s', input.key='%s'",111 s.gcs.Bucket, object)112 }113 defer rc.Close()114 size := rc.Attrs.Size115 var b []byte116 if size < 0 {117 // happened when using fake-gcs-server in integration test118 b, err = io.ReadAll(rc)119 } else {120 b = make([]byte, size)121 _, err = io.ReadFull(rc, b)122 }123 return b, errors.Trace(err)124}125// FileExists return true if file exists.126func (s *gcsStorage) FileExists(ctx context.Context, name string) (bool, error) {127 object := s.objectName(name)128 _, err := s.bucket.Object(object).Attrs(ctx)129 if err != nil {130 if errors.Cause(err) == storage.ErrObjectNotExist { // nolint:errorlint131 return false, nil132 }133 return false, errors.Trace(err)134 }135 return true, nil136}137// Open a Reader by file path.138func (s *gcsStorage) Open(ctx context.Context, path string) (ExternalFileReader, error) {139 object := s.objectName(path)140 handle := s.bucket.Object(object)141 rc, err := handle.NewRangeReader(ctx, 0, -1)142 if err != nil {143 return nil, errors.Annotatef(err,144 "failed to read gcs file, file info: input.bucket='%s', input.key='%s'",145 s.gcs.Bucket, path)146 }147 return &gcsObjectReader{148 storage: s,149 name: path,150 objHandle: handle,151 reader: rc,152 ctx: ctx,153 }, nil154}155// WalkDir traverse all the files in a dir.156//157// fn is the function called for each regular file visited by WalkDir.158// The first argument is the file path that can be used in `Open`159// function; the second argument is the size in byte of the file determined160// by path.161func (s *gcsStorage) WalkDir(ctx context.Context, opt *WalkOption, fn func(string, int64) error) error {162 if opt == nil {163 opt = &WalkOption{}164 }165 if len(opt.ObjPrefix) != 0 {166 return errors.New("gcs storage not support ObjPrefix for now")167 }168 prefix := path.Join(s.gcs.Prefix, opt.SubDir)169 if len(prefix) > 0 && !strings.HasSuffix(prefix, "/") {170 prefix += "/"171 }172 query := &storage.Query{Prefix: prefix}173 // only need each object's name and size174 err := query.SetAttrSelection([]string{"Name", "Size"})175 if err != nil {176 return errors.Trace(err)177 }178 iter := s.bucket.Objects(ctx, query)179 for {180 attrs, err := iter.Next()181 if err == iterator.Done {182 break183 }184 if err != nil {185 return errors.Trace(err)186 }187 // when walk on specify directory, the result include storage.Prefix,188 // which can not be reuse in other API(Open/Read) directly.189 // so we use TrimPrefix to filter Prefix for next Open/Read.190 path := strings.TrimPrefix(attrs.Name, s.gcs.Prefix)191 // trim the prefix '/' to ensure that the path returned is consistent with the local storage192 path = strings.TrimPrefix(path, "/")193 if err = fn(path, attrs.Size); err != nil {194 return errors.Trace(err)195 }196 }197 return nil198}199func (s *gcsStorage) URI() string {200 return "gcs://" + s.gcs.Bucket + "/" + s.gcs.Prefix201}202// Create implements ExternalStorage interface.203func (s *gcsStorage) Create(ctx context.Context, name string) (ExternalFileWriter, error) {204 object := s.objectName(name)205 wc := s.bucket.Object(object).NewWriter(ctx)206 wc.StorageClass = s.gcs.StorageClass207 wc.PredefinedACL = s.gcs.PredefinedAcl208 return newFlushStorageWriter(wc, &emptyFlusher{}, wc), nil209}210// Rename file name from oldFileName to newFileName.211func (s *gcsStorage) Rename(ctx context.Context, oldFileName, newFileName string) error {212 data, err := s.ReadFile(ctx, oldFileName)213 if err != nil {214 return errors.Trace(err)215 }216 err = s.WriteFile(ctx, newFileName, data)217 if err != nil {218 return errors.Trace(err)219 }220 return s.DeleteFile(ctx, oldFileName)221}222func newGCSStorage(ctx context.Context, gcs *backuppb.GCS, opts *ExternalStorageOptions) (*gcsStorage, error) {223 var clientOps []option.ClientOption224 if opts.NoCredentials {225 clientOps = append(clientOps, option.WithoutAuthentication())226 } else {227 if gcs.CredentialsBlob == "" {228 creds, err := google.FindDefaultCredentials(ctx, storage.ScopeReadWrite)229 if err != nil {230 return nil, errors.Annotatef(berrors.ErrStorageInvalidConfig, "%v Or you should provide '--gcs.credentials_file'", err)231 }232 if opts.SendCredentials {233 if len(creds.JSON) > 0 {234 gcs.CredentialsBlob = string(creds.JSON)235 } else {236 return nil, errors.Annotate(berrors.ErrStorageInvalidConfig,237 "You should provide '--gcs.credentials_file' when '--send-credentials-to-tikv' is true")238 }239 }240 if creds != nil {241 clientOps = append(clientOps, option.WithCredentials(creds))242 }243 } else {244 clientOps = append(clientOps, option.WithCredentialsJSON([]byte(gcs.GetCredentialsBlob())))245 }246 }247 if gcs.Endpoint != "" {248 clientOps = append(clientOps, option.WithEndpoint(gcs.Endpoint))249 }250 if opts.HTTPClient != nil {251 clientOps = append(clientOps, option.WithHTTPClient(opts.HTTPClient))252 }253 client, err := storage.NewClient(ctx, clientOps...)254 if err != nil {255 return nil, errors.Trace(err)256 }257 if !opts.SendCredentials {258 // Clear the credentials if exists so that they will not be sent to TiKV259 gcs.CredentialsBlob = ""260 }261 bucket := client.Bucket(gcs.Bucket)262 // check whether it's a bug before #647, to solve case #2263 // If the storage is set as gcs://bucket/prefix/,264 // the backupmeta is written correctly to gcs://bucket/prefix/backupmeta,265 // but the SSTs are written wrongly to gcs://bucket/prefix//*.sst (note the extra slash).266 // see details about case 2 at https://github.com/pingcap/br/issues/675#issuecomment-753780742267 sstInPrefix := hasSSTFiles(ctx, bucket, gcs.Prefix)268 sstInPrefixSlash := hasSSTFiles(ctx, bucket, gcs.Prefix+"//")269 if sstInPrefixSlash && !sstInPrefix {270 // This is a old bug, but we must make it compatible.271 // so we need find sst in slash directory272 gcs.Prefix += "//"273 }274 return &gcsStorage{gcs: gcs, bucket: bucket}, nil275}276func hasSSTFiles(ctx context.Context, bucket *storage.BucketHandle, prefix string) bool {277 query := storage.Query{Prefix: prefix}278 _ = query.SetAttrSelection([]string{"Name"})279 it := bucket.Objects(ctx, &query)280 for {281 attrs, err := it.Next()282 if err == iterator.Done { // nolint:errorlint283 break284 }285 if err != nil {286 log.Warn("failed to list objects on gcs, will use default value for `prefix`", zap.Error(err))287 break288 }289 if strings.HasSuffix(attrs.Name, ".sst") {290 log.Info("sst file found in prefix slash", zap.String("file", attrs.Name))291 return true292 }293 }294 return false295}296// gcsObjectReader wrap storage.Reader and add the `Seek` method.297type gcsObjectReader struct {298 storage *gcsStorage299 name string300 objHandle *storage.ObjectHandle301 reader io.ReadCloser302 pos int64303 // reader context used for implement `io.Seek`304 // currently, lightning depends on package `xitongsys/parquet-go` to read parquet file and it needs `io.Seeker`305 // See: https://github.com/xitongsys/parquet-go/blob/207a3cee75900b2b95213627409b7bac0f190bb3/source/source.go#L9-L10306 ctx context.Context307}308// Read implement the io.Reader interface.309func (r *gcsObjectReader) Read(p []byte) (n int, err error) {310 if r.reader == nil {311 rc, err := r.objHandle.NewRangeReader(r.ctx, r.pos, -1)312 if err != nil {313 return 0, errors.Annotatef(err,314 "failed to read gcs file, file info: input.bucket='%s', input.key='%s'",315 r.storage.gcs.Bucket, r.name)316 }317 r.reader = rc318 }319 n, err = r.reader.Read(p)320 r.pos += int64(n)321 return n, err322}323// Close implement the io.Closer interface.324func (r *gcsObjectReader) Close() error {325 if r.reader == nil {326 return nil327 }328 return r.reader.Close()329}330// Seek implement the io.Seeker interface.331//332// Currently, tidb-lightning depends on this method to read parquet file for gcs storage.333func (r *gcsObjectReader) Seek(offset int64, whence int) (int64, error) {334 var realOffset int64335 switch whence {336 case io.SeekStart:337 if offset < 0 {338 return 0, errors.Annotatef(berrors.ErrInvalidArgument, "Seek: offset '%v' out of range.", offset)339 }340 realOffset = offset341 case io.SeekCurrent:342 realOffset = r.pos + offset343 if r.pos < 0 && realOffset >= 0 {344 return 0, errors.Annotatef(berrors.ErrInvalidArgument, "Seek: offset '%v' out of range. current pos is '%v'.", offset, r.pos)345 }346 case io.SeekEnd:347 if offset >= 0 {348 return 0, errors.Annotatef(berrors.ErrInvalidArgument, "Seek: offset '%v' should be negative.", offset)349 }350 // GCS supports `NewRangeReader(ctx, -10, -1)`, which means read the last 10 bytes.351 realOffset = offset352 default:353 return 0, errors.Annotatef(berrors.ErrStorageUnknown, "Seek: invalid whence '%d'", whence)354 }355 if realOffset == r.pos {356 return realOffset, nil357 }358 _ = r.reader.Close()359 r.pos = realOffset360 rc, err := r.objHandle.NewRangeReader(r.ctx, r.pos, -1)361 if err != nil {362 return 0, errors.Annotatef(err,363 "failed to read gcs file, file info: input.bucket='%s', input.key='%s'",364 r.storage.gcs.Bucket, r.name)365 }366 r.reader = rc367 return realOffset, nil368}...
gcs_storage.go
Source:gcs_storage.go
...87 conf := dest.GoogleCloudConfig88 if conf == nil {89 return nil, errors.Errorf("google cloud storage upload requested but info missing")90 }91 const scope = gcs.ScopeReadWrite92 opts := []option.ClientOption{option.WithScopes(scope)}93 // "default": only use the key in the settings; error if not present.94 // "specified": the JSON object for authentication is given by the CREDENTIALS param.95 // "implicit": only use the environment data.96 // "": if default key is in the settings use it; otherwise use environment data.97 if args.IOConf.DisableImplicitCredentials && conf.Auth == cloud.AuthParamImplicit {98 return nil, errors.New(99 "implicit credentials disallowed for gs due to --external-io-disable-implicit-credentials flag")100 }101 switch conf.Auth {102 case cloud.AuthParamImplicit:103 // Do nothing; use implicit params:104 // https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials105 default:106 if conf.Credentials == "" {107 return nil, errors.Errorf(108 "%s must be set unless %q is %q",109 CredentialsParam,110 cloud.AuthParam,111 cloud.AuthParamImplicit,112 )113 }114 decodedKey, err := base64.StdEncoding.DecodeString(conf.Credentials)115 if err != nil {116 return nil, errors.Wrapf(err, "decoding value of %s", CredentialsParam)117 }118 source, err := google.JWTConfigFromJSON(decodedKey, scope)119 if err != nil {120 return nil, errors.Wrap(err, "creating GCS oauth token source from specified credentials")121 }122 opts = append(opts, option.WithTokenSource(source.TokenSource(ctx)))123 }124 g, err := gcs.NewClient(ctx, opts...)125 if err != nil {126 return nil, errors.Wrap(err, "failed to create google cloud client")127 }128 bucket := g.Bucket(conf.Bucket)129 if conf.BillingProject != `` {130 bucket = bucket.UserProject(conf.BillingProject)131 }132 return &gcsStorage{133 bucket: bucket,134 client: g,135 conf: conf,136 ioConf: args.IOConf,137 prefix: conf.Prefix,138 settings: args.Settings,139 }, nil140}141func (g *gcsStorage) Writer(ctx context.Context, basename string) (io.WriteCloser, error) {142 ctx, sp := tracing.ChildSpan(ctx, "gcs.Writer")143 defer sp.Finish()144 sp.RecordStructured(&types.StringValue{Value: fmt.Sprintf("gcs.Writer: %s",145 path.Join(g.prefix, basename))})146 w := g.bucket.Object(path.Join(g.prefix, basename)).NewWriter(ctx)147 if !gcsChunkingEnabled.Get(&g.settings.SV) {148 w.ChunkSize = 0149 }150 return w, nil151}152// ReadFile is shorthand for ReadFileAt with offset 0.153func (g *gcsStorage) ReadFile(ctx context.Context, basename string) (io.ReadCloser, error) {154 reader, _, err := g.ReadFileAt(ctx, basename, 0)155 return reader, err156}157func (g *gcsStorage) ReadFileAt(158 ctx context.Context, basename string, offset int64,159) (io.ReadCloser, int64, error) {160 object := path.Join(g.prefix, basename)161 ctx, sp := tracing.ChildSpan(ctx, "gcs.ReadFileAt")162 defer sp.Finish()163 sp.RecordStructured(&types.StringValue{Value: fmt.Sprintf("gcs.ReadFileAt: %s",164 path.Join(g.prefix, basename))})165 r := &cloud.ResumingReader{166 Ctx: ctx,167 Opener: func(ctx context.Context, pos int64) (io.ReadCloser, error) {168 return g.bucket.Object(object).NewRangeReader(ctx, pos, -1)169 },170 RetryOnErrFn: cloud.IsResumableHTTPError,171 Pos: offset,172 }173 if err := r.Open(); err != nil {174 if errors.Is(err, gcs.ErrObjectNotExist) {175 // Callers of this method sometimes look at the returned error to determine176 // if file does not exist. Regardless why we couldn't open the stream177 // (whether its invalid bucket or file doesn't exist),178 // return our internal ErrFileDoesNotExist.179 err = errors.WithMessagef(180 errors.Wrap(cloud.ErrFileDoesNotExist, "gcs object does not exist"),181 "%s",182 err.Error(),183 )184 }185 return nil, 0, err186 }187 return r, r.Reader.(*gcs.Reader).Attrs.Size, nil188}189func (g *gcsStorage) List(ctx context.Context, prefix, delim string, fn cloud.ListingFn) error {190 dest := cloud.JoinPathPreservingTrailingSlash(g.prefix, prefix)191 ctx, sp := tracing.ChildSpan(ctx, "gcs.List")192 defer sp.Finish()193 sp.RecordStructured(&types.StringValue{Value: fmt.Sprintf("gcs.List: %s", dest)})194 it := g.bucket.Objects(ctx, &gcs.Query{Prefix: dest, Delimiter: delim})195 for {196 attrs, err := it.Next()197 if errors.Is(err, iterator.Done) {198 return nil199 }200 if err != nil {201 return errors.Wrap(err, "unable to list files in gcs bucket")202 }203 name := attrs.Name204 if name == "" {205 name = attrs.Prefix206 }207 if err := fn(strings.TrimPrefix(name, dest)); err != nil {208 return err209 }210 }211}212func (g *gcsStorage) Delete(ctx context.Context, basename string) error {213 return contextutil.RunWithTimeout(ctx, "delete gcs file",214 cloud.Timeout.Get(&g.settings.SV),215 func(ctx context.Context) error {216 return g.bucket.Object(path.Join(g.prefix, basename)).Delete(ctx)217 })218}219func (g *gcsStorage) Size(ctx context.Context, basename string) (int64, error) {220 var r *gcs.Reader221 if err := contextutil.RunWithTimeout(ctx, "size gcs file",222 cloud.Timeout.Get(&g.settings.SV),223 func(ctx context.Context) error {224 var err error225 r, err = g.bucket.Object(path.Join(g.prefix, basename)).NewReader(ctx)226 return err227 }); err != nil {228 return 0, err229 }230 sz := r.Attrs.Size231 _ = r.Close()232 return sz, nil233}234func (g *gcsStorage) Close() error {235 return g.client.Close()236}237func init() {238 cloud.RegisterExternalStorageProvider(roachpb.ExternalStorageProvider_gs,239 parseGSURL, makeGCSStorage, cloud.RedactedParams(CredentialsParam), "gs")...
store.go
Source:store.go
...23 googleStore.bucket = bucket24 var err error25 if credentialFile != "" {26 c := option.WithCredentialsFile(credentialFile)27 googleStore.readOnlyClient, err = gcsStorage.NewClient(ctx, option.WithScopes(gcsStorage.ScopeReadOnly), c)28 } else {29 googleStore.readOnlyClient, err = gcsStorage.NewClient(ctx, option.WithScopes(gcsStorage.ScopeReadOnly))30 }31 if err != nil {32 return nil, err33 }34 if credentialFile != "" {35 c := option.WithCredentialsFile(credentialFile)36 googleStore.client, err = gcsStorage.NewClient(ctx, option.WithScopes(gcsStorage.ScopeFullControl), c)37 } else {38 googleStore.client, err = gcsStorage.NewClient(ctx, option.WithScopes(gcsStorage.ScopeFullControl))39 }40 if err != nil {41 return nil, err42 }43 return googleStore, err44}45func (g *gcs) String() string {46 return "gcs://" + g.bucket47}48func (g *gcs) Has(ctx context.Context, objectName string) (bool, error) {49 client := g.readOnlyClient50 _, err := client.Bucket(g.bucket).Object(objectName).Attrs(ctx)51 if err != nil {52 if err == gcsStorage.ErrObjectNotExist {53 return false, nil54 }55 return false, err56 }57 return true, nil58}59type gcsReader struct {60 g *gcs61 objectName string62 objectReader io.ReadCloser63}64func (r *gcsReader) WriteTo(writer io.Writer) (n int64, err error) {65 return storage.PipeIO(writer, r.objectReader)66}67func (r *gcsReader) Close() error {68 return r.objectReader.Close()69}70func (r *gcsReader) Read(p []byte) (n int, err error) {71 read, err := r.objectReader.Read(p)72 return read, err73}74func (r *gcsReader) ReadAt(p []byte, offset int64) (n int, err error) {75 objectReader, err := r.g.readOnlyClient.Bucket(r.g.bucket).Object(r.objectName).NewRangeReader(76 r.g.ctx, offset, int64(len(p)))77 if err != nil {78 return 0, err79 }80 return objectReader.Read(p)81}82func toSentinelErrors(err error) error {83 // return sentinel errors defined by the status package84 if strings.Contains(err.Error(), "object doesn't exist") {85 return status.ErrNotExists86 }87 return err88}89func (g *gcs) Get(ctx context.Context, objectName string) (io.ReadCloser, error) {90 objectReader, err := g.readOnlyClient.Bucket(g.bucket).Object(objectName).NewReader(ctx)91 if err != nil {92 return nil, toSentinelErrors(err)93 }94 return &gcsReader{95 g: g,96 objectReader: objectReader,97 }, nil98}99func (g *gcs) GetAttr(ctx context.Context, objectName string) (storage.Attributes, error) {100 attr, err := g.readOnlyClient.Bucket(g.bucket).Object(objectName).Attrs(ctx)101 if err != nil {102 return storage.Attributes{}, err103 }104 return storage.Attributes{105 Created: attr.Created,106 Updated: attr.Updated,107 Owner: attr.Owner,108 }, nil109}110func (g *gcs) GetAt(ctx context.Context, objectName string) (io.ReaderAt, error) {111 return &gcsReader{112 g: g,113 objectName: objectName,114 }, nil115}116func (g *gcs) Touch(ctx context.Context, objectName string) error {117 _, err := g.client.Bucket(g.bucket).Object(objectName).Update(ctx, gcsStorage.ObjectAttrsToUpdate{})118 return err119}120type readCloser struct {121 reader io.Reader122}123func (rc readCloser) Read(p []byte) (n int, err error) {124 return rc.reader.Read(p)125}126func (rc readCloser) Close() error {127 return nil128}129func (g *gcs) Put(ctx context.Context, objectName string, reader io.Reader, newObject storage.NewKey) error {130 // Put if not present131 var writer *gcsStorage.Writer132 b := false133 if newObject {134 b = true135 }136 if newObject {137 writer = g.client.Bucket(g.bucket).Object(objectName).If(gcsStorage.Conditions{138 DoesNotExist: b,139 }).NewWriter(ctx)140 } else {141 writer = g.client.Bucket(g.bucket).Object(objectName).NewWriter(ctx)142 }143 _, err := storage.PipeIO(writer, readCloser{reader: reader})144 if err != nil {145 return err146 }147 return writer.Close()148}149func (g *gcs) PutCRC(ctx context.Context, objectName string, reader io.Reader, doesNotExist bool, crc uint32) error {150 // Put if not present151 var writer *gcsStorage.Writer152 if doesNotExist {153 writer = g.client.Bucket(g.bucket).Object(objectName).If(gcsStorage.Conditions{154 DoesNotExist: doesNotExist,155 }).NewWriter(ctx)156 } else {157 writer = g.client.Bucket(g.bucket).Object(objectName).NewWriter(ctx)158 }159 writer.CRC32C = crc160 _, err := storage.PipeIO(writer, readCloser{reader: reader})161 if err != nil {162 return err163 }...
Read
Using AI Code Generation
1import (2func main() {3 ctx := context.Background()4 client, err := storage.NewClient(ctx)5 if err != nil {6 log.Fatal(err)7 }8 bucket := client.Bucket("my-bucket")9 if _, err := bucket.Attrs(ctx); err != nil {10 if err := bucket.Create(ctx, "project-id", nil); err != nil {11 log.Fatalf("Failed to create bucket: %v", err)12 }13 }14 obj := bucket.Object("my-object")15 w := obj.NewWriter(ctx)16 w.Write([]byte("some data"))17 if err := w.Close(); err != nil {
Read
Using AI Code Generation
1import (2func main() {3 client, err := storage.NewClient()4 if err != nil {5 fmt.Println(err)6 }7 defer client.Close()8 r, err := client.Bucket("bucketname").Object("objectname").NewReader(client)9 if err != nil {10 fmt.Println(err)11 }12 defer r.Close()13 if _, err := io.Copy(os.Stdout, r); err != nil {14 fmt.Println(err)15 }16}17import (18func main() {19 client, err := storage.NewClient()20 if err != nil {21 fmt.Println(err)22 }23 defer client.Close()24 w := client.Bucket("bucketname").Object("objectname").NewWriter(client)25 w.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}}26 if _, err := io.Copy(w, os.Stdin); err != nil {27 fmt.Println(err)28 }29 if err := w.Close(); err != nil {30 fmt.Println(err)31 }32}33import (34func main() {35 client, err := storage.NewClient()36 if err != nil {37 fmt.Println(err)38 }
Read
Using AI Code Generation
1import (2func main() {3 obj.Create("test.txt")4 obj.Write("hello")5 obj.Close()6 fmt.Println(obj.Read())7}8import (9type Gcs struct {10}11func (obj *Gcs) Create(name string) {12 obj.file, _ = os.Create(name)13}14func (obj *Gcs) Write(str string) {15 obj.file.WriteString(str)16}17func (obj *Gcs) Close() {18 obj.file.Close()19}20func (obj *Gcs) Read() string {21 content, _ := ioutil.ReadFile(obj.file.Name())22 return string(content)23}
Read
Using AI Code Generation
1import (2func main() {3 g := gcs.NewGCS()4 g.Read()5 fmt.Println(g.Output)6}7import (8type gcs struct {9}10func (g *gcs) Read() {11 g.Output = strings.ToUpper("Hello World")12}13func NewGCS() *gcs {14 return &gcs{}15}
Read
Using AI Code Generation
1import "fmt"2func main() {3 fmt.Println("Hello World")4}5import "fmt"6func main() {7 fmt.Println("Hello World")8}9import "fmt"10func main() {11 str := []string{"a", "b", "c"}12 fmt.Println(str[len(str)-1])13}
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!