How to use typ method of serializer Package

Best Syzkaller code snippet using serializer.typ

encoding.go

Source:encoding.go Github

copy

Full Screen

...38 ctx.call(c)39 }40 return ctx.buf.Bytes()41}42type serializer struct {43 target *Target44 buf *bytes.Buffer45 vars map[*ResultArg]int46 varSeq int47 verbose bool48}49func (ctx *serializer) printf(text string, args ...interface{}) {50 fmt.Fprintf(ctx.buf, text, args...)51}52func (ctx *serializer) allocVarID(arg *ResultArg) int {53 id := ctx.varSeq54 ctx.varSeq++55 ctx.vars[arg] = id56 return id57}58func (ctx *serializer) call(c *Call) {59 if c.Ret != nil && len(c.Ret.uses) != 0 {60 ctx.printf("r%v = ", ctx.allocVarID(c.Ret))61 }62 ctx.printf("%v(", c.Meta.Name)63 for i, a := range c.Args {64 if IsPad(a.Type()) {65 continue66 }67 if i != 0 {68 ctx.printf(", ")69 }70 ctx.arg(a)71 }72 ctx.printf(")\n")73}74func (ctx *serializer) arg(arg Arg) {75 if arg == nil {76 ctx.printf("nil")77 return78 }79 arg.serialize(ctx)80}81func (a *ConstArg) serialize(ctx *serializer) {82 ctx.printf("0x%x", a.Val)83}84func (a *PointerArg) serialize(ctx *serializer) {85 if a.IsSpecial() {86 ctx.printf("0x%x", a.Address)87 return88 }89 target := ctx.target90 ctx.printf("&%v", target.serializeAddr(a))91 if a.Res != nil && !ctx.verbose && isDefault(a.Res) && !target.isAnyPtr(a.Type()) {92 return93 }94 ctx.printf("=")95 if target.isAnyPtr(a.Type()) {96 ctx.printf("ANY=")97 }98 ctx.arg(a.Res)99}100func (a *DataArg) serialize(ctx *serializer) {101 typ := a.Type().(*BufferType)102 if a.Dir() == DirOut {103 ctx.printf("\"\"/%v", a.Size())104 return105 }106 data := a.Data()107 // Statically typed data will be padded with 0s during deserialization,108 // so we can strip them here for readability always. For variable-size109 // data we strip trailing 0s only if we strip enough of them.110 sz := len(data)111 for len(data) >= 2 && data[len(data)-1] == 0 && data[len(data)-2] == 0 {112 data = data[:len(data)-1]113 }114 if typ.Varlen() && len(data)+8 >= sz {115 data = data[:sz]116 }117 serializeData(ctx.buf, data, isReadableDataType(typ))118 if typ.Varlen() && sz != len(data) {119 ctx.printf("/%v", sz)120 }121}122func (a *GroupArg) serialize(ctx *serializer) {123 var delims []byte124 switch a.Type().(type) {125 case *StructType:126 delims = []byte{'{', '}'}127 case *ArrayType:128 delims = []byte{'[', ']'}129 default:130 panic("unknown group type")131 }132 ctx.buf.WriteByte(delims[0])133 lastNonDefault := len(a.Inner) - 1134 if !ctx.verbose && a.fixedInnerSize() {135 for ; lastNonDefault >= 0; lastNonDefault-- {136 if !isDefault(a.Inner[lastNonDefault]) {137 break138 }139 }140 }141 for i := 0; i <= lastNonDefault; i++ {142 arg1 := a.Inner[i]143 if arg1 != nil && IsPad(arg1.Type()) {144 continue145 }146 if i != 0 {147 ctx.printf(", ")148 }149 ctx.arg(arg1)150 }151 ctx.buf.WriteByte(delims[1])152}153func (a *UnionArg) serialize(ctx *serializer) {154 typ := a.Type().(*UnionType)155 ctx.printf("@%v", typ.Fields[a.Index].Name)156 if !ctx.verbose && isDefault(a.Option) {157 return158 }159 ctx.printf("=")160 ctx.arg(a.Option)161}162func (a *ResultArg) serialize(ctx *serializer) {163 if len(a.uses) != 0 {164 ctx.printf("<r%v=>", ctx.allocVarID(a))165 }166 if a.Res == nil {167 ctx.printf("0x%x", a.Val)168 return169 }170 id, ok := ctx.vars[a.Res]171 if !ok {172 panic("no result")173 }174 ctx.printf("r%v", id)175 if a.OpDiv != 0 {176 ctx.printf("/%v", a.OpDiv)177 }178 if a.OpAdd != 0 {179 ctx.printf("+%v", a.OpAdd)180 }181}182type DeserializeMode int183const (184 Strict DeserializeMode = iota185 NonStrict DeserializeMode = iota186)187func (target *Target) Deserialize(data []byte, mode DeserializeMode) (*Prog, error) {188 defer func() {189 if err := recover(); err != nil {190 panic(fmt.Errorf("%v\ntarget: %v/%v, rev: %v, mode=%v, prog:\n%q",191 err, target.OS, target.Arch, GitRevision, mode, data))192 }193 }()194 p := newParser(target, data, mode == Strict)195 prog, err := p.parseProg()196 if err := p.Err(); err != nil {197 return nil, err198 }199 if err != nil {200 return nil, err201 }202 // This validation is done even in non-debug mode because deserialization203 // procedure does not catch all bugs (e.g. mismatched types).204 // And we can receive bad programs from corpus and hub.205 if err := prog.validate(); err != nil {206 return nil, err207 }208 if p.autos != nil {209 p.fixupAutos(prog)210 }211 if err := prog.sanitize(mode == NonStrict); err != nil {212 return nil, err213 }214 return prog, nil215}216func (p *parser) parseProg() (*Prog, error) {217 prog := &Prog{218 Target: p.target,219 }220 for p.Scan() {221 if p.EOF() {222 if p.comment != "" {223 prog.Comments = append(prog.Comments, p.comment)224 p.comment = ""225 }226 continue227 }228 if p.Char() == '#' {229 if p.comment != "" {230 prog.Comments = append(prog.Comments, p.comment)231 }232 p.comment = strings.TrimSpace(p.s[p.i+1:])233 continue234 }235 name := p.Ident()236 r := ""237 if p.Char() == '=' {238 r = name239 p.Parse('=')240 name = p.Ident()241 }242 meta := p.target.SyscallMap[name]243 if meta == nil {244 return nil, fmt.Errorf("unknown syscall %v", name)245 }246 c := &Call{247 Meta: meta,248 Ret: MakeReturnArg(meta.Ret),249 Comment: p.comment,250 }251 prog.Calls = append(prog.Calls, c)252 p.Parse('(')253 for i := 0; p.Char() != ')'; i++ {254 if i >= len(meta.Args) {255 p.eatExcessive(false, "excessive syscall arguments")256 break257 }258 field := meta.Args[i]259 if IsPad(field.Type) {260 return nil, fmt.Errorf("padding in syscall %v arguments", name)261 }262 arg, err := p.parseArg(field.Type, DirIn)263 if err != nil {264 return nil, err265 }266 c.Args = append(c.Args, arg)267 if p.Char() != ')' {268 p.Parse(',')269 }270 }271 p.Parse(')')272 p.SkipWs()273 if !p.EOF() {274 if p.Char() != '#' {275 return nil, fmt.Errorf("tailing data (line #%v)", p.l)276 }277 if c.Comment != "" {278 prog.Comments = append(prog.Comments, c.Comment)279 }280 c.Comment = strings.TrimSpace(p.s[p.i+1:])281 }282 for i := len(c.Args); i < len(meta.Args); i++ {283 p.strictFailf("missing syscall args")284 c.Args = append(c.Args, meta.Args[i].DefaultArg(DirIn))285 }286 if len(c.Args) != len(meta.Args) {287 return nil, fmt.Errorf("wrong call arg count: %v, want %v", len(c.Args), len(meta.Args))288 }289 if r != "" && c.Ret != nil {290 p.vars[r] = c.Ret291 }292 p.comment = ""293 }294 if p.comment != "" {295 prog.Comments = append(prog.Comments, p.comment)296 }297 return prog, nil298}299func (p *parser) parseArg(typ Type, dir Dir) (Arg, error) {300 r := ""301 if p.Char() == '<' {302 p.Parse('<')303 r = p.Ident()304 p.Parse('=')305 p.Parse('>')306 }307 arg, err := p.parseArgImpl(typ, dir)308 if err != nil {309 return nil, err310 }311 if arg == nil {312 if typ != nil {313 arg = typ.DefaultArg(dir)314 } else if r != "" {315 return nil, fmt.Errorf("named nil argument")316 }317 }318 if r != "" {319 if res, ok := arg.(*ResultArg); ok {320 p.vars[r] = res321 }322 }323 return arg, nil324}325func (p *parser) parseArgImpl(typ Type, dir Dir) (Arg, error) {326 if typ == nil && p.Char() != 'n' {327 p.eatExcessive(true, "non-nil argument for nil type")328 return nil, nil329 }330 switch p.Char() {331 case '0':332 return p.parseArgInt(typ, dir)333 case 'r':334 return p.parseArgRes(typ, dir)335 case '&':336 return p.parseArgAddr(typ, dir)337 case '"', '\'':338 return p.parseArgString(typ, dir)339 case '{':340 return p.parseArgStruct(typ, dir)341 case '[':342 return p.parseArgArray(typ, dir)343 case '@':344 return p.parseArgUnion(typ, dir)345 case 'n':346 p.Parse('n')347 p.Parse('i')348 p.Parse('l')349 return nil, nil350 case 'A':351 p.Parse('A')352 p.Parse('U')353 p.Parse('T')354 p.Parse('O')355 return p.parseAuto(typ, dir)356 default:357 return nil, fmt.Errorf("failed to parse argument at '%c' (line #%v/%v: %v)",358 p.Char(), p.l, p.i, p.s)359 }360}361func (p *parser) parseArgInt(typ Type, dir Dir) (Arg, error) {362 val := p.Ident()363 v, err := strconv.ParseUint(val, 0, 64)364 if err != nil {365 return nil, fmt.Errorf("wrong arg value '%v': %v", val, err)366 }367 switch typ.(type) {368 case *ConstType, *IntType, *FlagsType, *ProcType, *CsumType:369 arg := Arg(MakeConstArg(typ, dir, v))370 if dir == DirOut && !typ.isDefaultArg(arg) {371 p.strictFailf("out arg %v has non-default value: %v", typ, v)372 arg = typ.DefaultArg(dir)373 }374 return arg, nil375 case *LenType:376 return MakeConstArg(typ, dir, v), nil377 case *ResourceType:378 return MakeResultArg(typ, dir, nil, v), nil379 case *PtrType, *VmaType:380 index := -v % uint64(len(p.target.SpecialPointers))381 return MakeSpecialPointerArg(typ, dir, index), nil382 default:383 p.eatExcessive(true, "wrong int arg %T", typ)384 return typ.DefaultArg(dir), nil385 }386}387func (p *parser) parseAuto(typ Type, dir Dir) (Arg, error) {388 switch typ.(type) {389 case *ConstType, *LenType, *CsumType:390 return p.auto(MakeConstArg(typ, dir, 0)), nil391 default:392 return nil, fmt.Errorf("wrong type %T for AUTO", typ)393 }394}395func (p *parser) parseArgRes(typ Type, dir Dir) (Arg, error) {396 id := p.Ident()397 var div, add uint64398 if p.Char() == '/' {399 p.Parse('/')400 op := p.Ident()401 v, err := strconv.ParseUint(op, 0, 64)402 if err != nil {403 return nil, fmt.Errorf("wrong result div op: '%v'", op)404 }405 div = v406 }407 if p.Char() == '+' {408 p.Parse('+')409 op := p.Ident()410 v, err := strconv.ParseUint(op, 0, 64)411 if err != nil {412 return nil, fmt.Errorf("wrong result add op: '%v'", op)413 }414 add = v415 }416 v := p.vars[id]417 if v == nil {418 p.strictFailf("undeclared variable %v", id)419 return typ.DefaultArg(dir), nil420 }421 arg := MakeResultArg(typ, dir, v, 0)422 arg.OpDiv = div423 arg.OpAdd = add424 return arg, nil425}426func (p *parser) parseArgAddr(typ Type, dir Dir) (Arg, error) {427 var elem Type428 elemDir := DirInOut429 switch t1 := typ.(type) {430 case *PtrType:431 elem, elemDir = t1.Elem, t1.ElemDir432 case *VmaType:433 default:434 p.eatExcessive(true, "wrong addr arg")435 return typ.DefaultArg(dir), nil436 }437 p.Parse('&')438 auto := false439 var addr, vmaSize uint64440 if p.Char() == 'A' {441 p.Parse('A')442 p.Parse('U')443 p.Parse('T')444 p.Parse('O')445 if elem == nil {446 return nil, fmt.Errorf("vma type can't be AUTO")447 }448 auto = true449 } else {450 var err error451 addr, vmaSize, err = p.parseAddr()452 if err != nil {453 return nil, err454 }455 }456 var inner Arg457 if p.Char() == '=' {458 p.Parse('=')459 if p.Char() == 'A' {460 p.Parse('A')461 p.Parse('N')462 p.Parse('Y')463 p.Parse('=')464 anyPtr := p.target.getAnyPtrType(typ.Size())465 typ, elem, elemDir = anyPtr, anyPtr.Elem, anyPtr.ElemDir466 }467 var err error468 inner, err = p.parseArg(elem, elemDir)469 if err != nil {470 return nil, err471 }472 }473 if elem == nil {474 if addr%p.target.PageSize != 0 {475 p.strictFailf("unaligned vma address 0x%x", addr)476 addr &= ^(p.target.PageSize - 1)477 }478 return MakeVmaPointerArg(typ, dir, addr, vmaSize), nil479 }480 if inner == nil {481 inner = elem.DefaultArg(elemDir)482 }483 arg := MakePointerArg(typ, dir, addr, inner)484 if auto {485 p.auto(arg)486 }487 return arg, nil488}489func (p *parser) parseArgString(t Type, dir Dir) (Arg, error) {490 typ, ok := t.(*BufferType)491 if !ok {492 p.eatExcessive(true, "wrong string arg")493 return t.DefaultArg(dir), nil494 }495 data, err := p.deserializeData()496 if err != nil {497 return nil, err498 }499 size := ^uint64(0)500 if p.Char() == '/' {501 p.Parse('/')502 sizeStr := p.Ident()503 size, err = strconv.ParseUint(sizeStr, 0, 64)504 if err != nil {505 return nil, fmt.Errorf("failed to parse buffer size: %q", sizeStr)506 }507 maxMem := p.target.NumPages * p.target.PageSize508 if size > maxMem {509 p.strictFailf("too large string argument %v", size)510 size = maxMem511 }512 }513 if !typ.Varlen() {514 size = typ.Size()515 } else if size == ^uint64(0) {516 size = uint64(len(data))517 }518 if dir == DirOut {519 return MakeOutDataArg(typ, dir, size), nil520 }521 if diff := int(size) - len(data); diff > 0 {522 data = append(data, make([]byte, diff)...)523 }524 data = data[:size]525 if typ.Kind == BufferString && len(typ.Values) != 0 &&526 // AUTOGENERATED will be padded by 0's.527 !strings.HasPrefix(typ.Values[0], "AUTOGENERATED") {528 matched := false529 for _, val := range typ.Values {530 if string(data) == val {531 matched = true532 break533 }534 }535 if !matched {536 p.strictFailf("bad string value %q, expect %q", data, typ.Values)537 data = []byte(typ.Values[0])538 }539 }540 return MakeDataArg(typ, dir, data), nil541}542func (p *parser) parseArgStruct(typ Type, dir Dir) (Arg, error) {543 p.Parse('{')544 t1, ok := typ.(*StructType)545 if !ok {546 p.eatExcessive(false, "wrong struct arg")547 p.Parse('}')548 return typ.DefaultArg(dir), nil549 }550 var inner []Arg551 for i := 0; p.Char() != '}'; i++ {552 if i >= len(t1.Fields) {553 p.eatExcessive(false, "excessive struct %v fields", typ.Name())554 break555 }556 field := t1.Fields[i]557 if IsPad(field.Type) {558 inner = append(inner, MakeConstArg(field.Type, dir, 0))559 } else {560 arg, err := p.parseArg(field.Type, dir)561 if err != nil {562 return nil, err563 }564 inner = append(inner, arg)565 if p.Char() != '}' {566 p.Parse(',')567 }568 }569 }570 p.Parse('}')571 for len(inner) < len(t1.Fields) {572 field := t1.Fields[len(inner)]573 if !IsPad(field.Type) {574 p.strictFailf("missing struct %v fields %v/%v", typ.Name(), len(inner), len(t1.Fields))575 }576 inner = append(inner, field.Type.DefaultArg(dir))577 }578 return MakeGroupArg(typ, dir, inner), nil579}580func (p *parser) parseArgArray(typ Type, dir Dir) (Arg, error) {581 p.Parse('[')582 t1, ok := typ.(*ArrayType)583 if !ok {584 p.eatExcessive(false, "wrong array arg %T", typ)585 p.Parse(']')586 return typ.DefaultArg(dir), nil587 }588 var inner []Arg589 for i := 0; p.Char() != ']'; i++ {590 arg, err := p.parseArg(t1.Elem, dir)591 if err != nil {592 return nil, err593 }594 inner = append(inner, arg)595 if p.Char() != ']' {596 p.Parse(',')597 }598 }599 p.Parse(']')600 if t1.Kind == ArrayRangeLen && t1.RangeBegin == t1.RangeEnd {601 for uint64(len(inner)) < t1.RangeBegin {602 p.strictFailf("missing array elements")603 inner = append(inner, t1.Elem.DefaultArg(dir))604 }605 inner = inner[:t1.RangeBegin]606 }607 return MakeGroupArg(typ, dir, inner), nil608}609func (p *parser) parseArgUnion(typ Type, dir Dir) (Arg, error) {610 t1, ok := typ.(*UnionType)611 if !ok {612 p.eatExcessive(true, "wrong union arg")613 return typ.DefaultArg(dir), nil614 }615 p.Parse('@')616 name := p.Ident()617 var optType Type618 index := -1619 for i, field := range t1.Fields {620 if name == field.Name {621 optType, index = field.Type, i622 break623 }624 }625 if optType == nil {626 p.eatExcessive(true, "wrong union option")627 return typ.DefaultArg(dir), nil628 }629 var opt Arg630 if p.Char() == '=' {631 p.Parse('=')632 var err error633 opt, err = p.parseArg(optType, dir)634 if err != nil {635 return nil, err636 }637 } else {638 opt = optType.DefaultArg(dir)639 }640 return MakeUnionArg(typ, dir, opt, index), nil641}642// Eats excessive call arguments and struct fields to recover after description changes.643func (p *parser) eatExcessive(stopAtComma bool, what string, args ...interface{}) {644 p.strictFailf(what, args...)645 paren, brack, brace := 0, 0, 0646 for !p.EOF() && p.e == nil {647 ch := p.Char()648 switch ch {649 case '(':650 paren++651 case ')':652 if paren == 0 {653 return654 }655 paren--656 case '[':657 brack++658 case ']':659 if brack == 0 {660 return661 }662 brack--663 case '{':664 brace++665 case '}':666 if brace == 0 {667 return668 }669 brace--670 case ',':671 if stopAtComma && paren == 0 && brack == 0 && brace == 0 {672 return673 }674 case '\'', '"':675 p.Parse(ch)676 for !p.EOF() && p.Char() != ch {677 p.Parse(p.Char())678 }679 if p.EOF() {680 return681 }682 }683 p.Parse(ch)684 }685}686const (687 encodingAddrBase = 0x7f0000000000688 maxLineLen = 1 << 20689)690func (target *Target) serializeAddr(arg *PointerArg) string {691 ssize := ""692 if arg.VmaSize != 0 {693 ssize = fmt.Sprintf("/0x%x", arg.VmaSize)694 }695 return fmt.Sprintf("(0x%x%v)", encodingAddrBase+arg.Address, ssize)696}697func (p *parser) parseAddr() (uint64, uint64, error) {698 p.Parse('(')699 pstr := p.Ident()700 addr, err := strconv.ParseUint(pstr, 0, 64)701 if err != nil {702 return 0, 0, fmt.Errorf("failed to parse addr: %q", pstr)703 }704 if addr < encodingAddrBase {705 return 0, 0, fmt.Errorf("address without base offset: %q", pstr)706 }707 addr -= encodingAddrBase708 // This is not used anymore, but left here to parse old programs.709 if p.Char() == '+' || p.Char() == '-' {710 minus := false711 if p.Char() == '-' {712 minus = true713 p.Parse('-')714 } else {715 p.Parse('+')716 }717 ostr := p.Ident()718 off, err := strconv.ParseUint(ostr, 0, 64)719 if err != nil {720 return 0, 0, fmt.Errorf("failed to parse addr offset: %q", ostr)721 }722 if minus {723 off = -off724 }725 addr += off726 }727 target := p.target728 maxMem := target.NumPages * target.PageSize729 var vmaSize uint64730 if p.Char() == '/' {731 p.Parse('/')732 pstr := p.Ident()733 size, err := strconv.ParseUint(pstr, 0, 64)734 if err != nil {735 return 0, 0, fmt.Errorf("failed to parse addr size: %q", pstr)736 }737 addr = addr & ^(target.PageSize - 1)738 vmaSize = (size + target.PageSize - 1) & ^(target.PageSize - 1)739 if vmaSize == 0 {740 vmaSize = target.PageSize741 }742 if vmaSize > maxMem {743 vmaSize = maxMem744 }745 if addr > maxMem-vmaSize {746 addr = maxMem - vmaSize747 }748 }749 p.Parse(')')750 return addr, vmaSize, nil751}752func serializeData(buf *bytes.Buffer, data []byte, readable bool) {753 if !readable && !isReadableData(data) {754 fmt.Fprintf(buf, "\"%v\"", hex.EncodeToString(data))755 return756 }757 buf.WriteByte('\'')758 encodeData(buf, data, true, false)759 buf.WriteByte('\'')760}761func EncodeData(buf *bytes.Buffer, data []byte, readable bool) {762 if !readable && isReadableData(data) {763 readable = true764 }765 encodeData(buf, data, readable, true)766}767func encodeData(buf *bytes.Buffer, data []byte, readable, cstr bool) {768 for _, v := range data {769 if !readable {770 lo, hi := byteToHex(v)771 buf.Write([]byte{'\\', 'x', hi, lo})772 continue773 }774 switch v {775 case '\a':776 buf.Write([]byte{'\\', 'a'})777 case '\b':778 buf.Write([]byte{'\\', 'b'})779 case '\f':780 buf.Write([]byte{'\\', 'f'})781 case '\n':782 buf.Write([]byte{'\\', 'n'})783 case '\r':784 buf.Write([]byte{'\\', 'r'})785 case '\t':786 buf.Write([]byte{'\\', 't'})787 case '\v':788 buf.Write([]byte{'\\', 'v'})789 case '\'':790 buf.Write([]byte{'\\', '\''})791 case '"':792 buf.Write([]byte{'\\', '"'})793 case '\\':794 buf.Write([]byte{'\\', '\\'})795 default:796 if isPrintable(v) {797 buf.WriteByte(v)798 } else {799 if cstr {800 // We would like to use hex encoding with \x,801 // but C's \x is hard to use: it can contain _any_ number of hex digits802 // (not just 2 or 4), so later non-hex encoded chars will glue to \x.803 c0 := (v>>6)&0x7 + '0'804 c1 := (v>>3)&0x7 + '0'805 c2 := (v>>0)&0x7 + '0'806 buf.Write([]byte{'\\', c0, c1, c2})807 } else {808 lo, hi := byteToHex(v)809 buf.Write([]byte{'\\', 'x', hi, lo})810 }811 }812 }813 }814}815func isReadableDataType(typ *BufferType) bool {816 return typ.Kind == BufferString || typ.Kind == BufferFilename817}818func isReadableData(data []byte) bool {819 if len(data) == 0 {820 return false821 }822 for _, v := range data {823 if isPrintable(v) {824 continue825 }826 switch v {827 case 0, '\a', '\b', '\f', '\n', '\r', '\t', '\v':828 continue829 }830 return false831 }832 return true833}834func (p *parser) deserializeData() ([]byte, error) {835 var data []byte836 if p.Char() == '"' {837 p.Parse('"')838 val := ""839 if p.Char() != '"' {840 val = p.Ident()841 }842 p.Parse('"')843 var err error844 data, err = hex.DecodeString(val)845 if err != nil {846 return nil, fmt.Errorf("data arg has bad value %q", val)847 }848 } else {849 if p.consume() != '\'' {850 return nil, fmt.Errorf("data arg does not start with \" nor with '")851 }852 for p.Char() != '\'' && p.Char() != 0 {853 v := p.consume()854 if v != '\\' {855 data = append(data, v)856 continue857 }858 v = p.consume()859 switch v {860 case 'x':861 hi := p.consume()862 lo := p.consume()863 b, ok := hexToByte(lo, hi)864 if !ok {865 return nil, fmt.Errorf("invalid hex \\x%v%v in data arg", hi, lo)866 }867 data = append(data, b)868 case 'a':869 data = append(data, '\a')870 case 'b':871 data = append(data, '\b')872 case 'f':873 data = append(data, '\f')874 case 'n':875 data = append(data, '\n')876 case 'r':877 data = append(data, '\r')878 case 't':879 data = append(data, '\t')880 case 'v':881 data = append(data, '\v')882 case '\'':883 data = append(data, '\'')884 case '"':885 data = append(data, '"')886 case '\\':887 data = append(data, '\\')888 default:889 return nil, fmt.Errorf("invalid \\%c escape sequence in data arg", v)890 }891 }892 p.Parse('\'')893 }894 return data, nil895}896func isPrintable(v byte) bool {897 return v >= 0x20 && v < 0x7f898}899func byteToHex(v byte) (lo, hi byte) {900 return toHexChar(v & 0xf), toHexChar(v >> 4)901}902func hexToByte(lo, hi byte) (byte, bool) {903 h, ok1 := fromHexChar(hi)904 l, ok2 := fromHexChar(lo)905 return h<<4 + l, ok1 && ok2906}907func toHexChar(v byte) byte {908 if v >= 16 {909 panic("bad hex char")910 }911 if v < 10 {912 return '0' + v913 }914 return 'a' + v - 10915}916func fromHexChar(v byte) (byte, bool) {917 if v >= '0' && v <= '9' {918 return v - '0', true919 }920 if v >= 'a' && v <= 'f' {921 return v - 'a' + 10, true922 }923 return 0, false924}925type parser struct {926 target *Target927 strict bool928 vars map[string]*ResultArg929 autos map[Arg]bool930 comment string931 r *bufio.Scanner932 s string933 i int934 l int935 e error936}937func newParser(target *Target, data []byte, strict bool) *parser {938 p := &parser{939 target: target,940 strict: strict,941 vars: make(map[string]*ResultArg),942 r: bufio.NewScanner(bytes.NewReader(data)),943 }944 p.r.Buffer(nil, maxLineLen)945 return p946}947func (p *parser) auto(arg Arg) Arg {948 if p.autos == nil {949 p.autos = make(map[Arg]bool)950 }951 p.autos[arg] = true952 return arg953}954func (p *parser) fixupAutos(prog *Prog) {955 s := analyze(nil, nil, prog, nil)956 for _, c := range prog.Calls {957 p.target.assignSizesArray(c.Args, c.Meta.Args, p.autos)958 ForeachArg(c, func(arg Arg, _ *ArgCtx) {959 if !p.autos[arg] {960 return961 }962 delete(p.autos, arg)963 switch typ := arg.Type().(type) {964 case *ConstType:965 arg.(*ConstArg).Val = typ.Val966 _ = s967 case *PtrType:968 a := arg.(*PointerArg)969 a.Address = s.ma.alloc(nil, a.Res.Size())970 default:971 panic(fmt.Sprintf("unsupported auto type %T", typ))972 }973 })974 }975 if len(p.autos) != 0 {976 panic(fmt.Sprintf("leftoever autos: %+v", p.autos))977 }978}979func (p *parser) Scan() bool {980 if p.e != nil {981 return false982 }983 if !p.r.Scan() {984 p.e = p.r.Err()985 return false...

Full Screen

Full Screen

file.go

Source:file.go Github

copy

Full Screen

...15 "os"16 "github.com/apache/arrow/go/arrow/array"17 "github.com/cockroachdb/cockroach/pkg/col/coldata"18 "github.com/cockroachdb/cockroach/pkg/col/colserde/arrowserde"19 "github.com/cockroachdb/cockroach/pkg/col/typeconv"20 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"21 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"22 "github.com/cockroachdb/cockroach/pkg/sql/types"23 "github.com/cockroachdb/errors"24 mmap "github.com/edsrzf/mmap-go"25 flatbuffers "github.com/google/flatbuffers/go"26)27const fileMagic = `ARROW1`28var fileMagicPadding [8 - len(fileMagic)]byte29type fileBlock struct {30 offset int6431 metadataLen int3232 bodyLen int6433}34// FileSerializer converts our in-mem columnar batch representation into the35// arrow specification's file format. All batches serialized to a file must have36// the same schema.37type FileSerializer struct {38 scratch [4]byte39 w *countingWriter40 typs []*types.T41 fb *flatbuffers.Builder42 a *ArrowBatchConverter43 rb *RecordBatchSerializer44 recordBatches []fileBlock45}46// NewFileSerializer creates a FileSerializer for the given types. The caller is47// responsible for closing the given writer.48func NewFileSerializer(w io.Writer, typs []*types.T) (*FileSerializer, error) {49 a, err := NewArrowBatchConverter(typs)50 if err != nil {51 return nil, err52 }53 rb, err := NewRecordBatchSerializer(typs)54 if err != nil {55 return nil, err56 }57 s := &FileSerializer{58 typs: typs,59 fb: flatbuffers.NewBuilder(flatbufferBuilderInitialCapacity),60 a: a,61 rb: rb,62 }63 return s, s.Reset(w)64}65// Reset can be called to reuse this FileSerializer with a new io.Writer after66// calling Finish. The types will remain the ones passed to the constructor. The67// caller is responsible for closing the given writer.68func (s *FileSerializer) Reset(w io.Writer) error {69 if s.w != nil {70 return errors.New(`Finish must be called before Reset`)71 }72 s.w = &countingWriter{wrapped: w}73 s.recordBatches = s.recordBatches[:0]74 if _, err := io.WriteString(s.w, fileMagic); err != nil {75 return err76 }77 // Pad to 8 byte boundary.78 if _, err := s.w.Write(fileMagicPadding[:]); err != nil {79 return err80 }81 // The file format is a wrapper around the streaming format and the streaming82 // format starts with a Schema message.83 s.fb.Reset()84 messageOffset := schemaMessage(s.fb, s.typs)85 s.fb.Finish(messageOffset)86 schemaBytes := s.fb.FinishedBytes()87 if _, err := s.w.Write(schemaBytes); err != nil {88 return err89 }90 _, err := s.w.Write(make([]byte, calculatePadding(len(schemaBytes))))91 return err92}93// AppendBatch adds one batch of columnar data to the file.94func (s *FileSerializer) AppendBatch(batch coldata.Batch) error {95 offset := int64(s.w.written)96 arrow, err := s.a.BatchToArrow(batch)97 if err != nil {98 return err99 }100 metadataLen, bodyLen, err := s.rb.Serialize(s.w, arrow, batch.Length())101 if err != nil {102 return err103 }104 s.recordBatches = append(s.recordBatches, fileBlock{105 offset: offset,106 metadataLen: int32(metadataLen),107 bodyLen: int64(bodyLen),108 })109 return nil110}111// Finish writes the footer metadata described by the arrow spec. Nothing can be112// called after Finish except Reset.113func (s *FileSerializer) Finish() error {114 defer func() {115 s.w = nil116 }()117 // Write the footer flatbuffer, which has byte offsets of all the record118 // batch messages in the file.119 s.fb.Reset()120 footerOffset := fileFooter(s.fb, s.typs, s.recordBatches)121 s.fb.Finish(footerOffset)122 footerBytes := s.fb.FinishedBytes()123 if _, err := s.w.Write(footerBytes); err != nil {124 return err125 }126 // For the footer, and only the footer, the spec requires the length _after_127 // the footer so that it can be read by starting at the back of the file and128 // working forward.129 binary.LittleEndian.PutUint32(s.scratch[:], uint32(len(footerBytes)))130 if _, err := s.w.Write(s.scratch[:]); err != nil {131 return err132 }133 // Spec wants the magic again here.134 _, err := io.WriteString(s.w, fileMagic)135 return err136}137// FileDeserializer decodes columnar data batches from files encoded according138// to the arrow spec.139type FileDeserializer struct {140 buf []byte141 bufCloseFn func() error142 recordBatches []fileBlock143 idx int144 end int145 typs []*types.T146 a *ArrowBatchConverter147 rb *RecordBatchSerializer148 arrowScratch []*array.Data149}150// NewFileDeserializerFromBytes constructs a FileDeserializer for an in-memory151// buffer.152func NewFileDeserializerFromBytes(typs []*types.T, buf []byte) (*FileDeserializer, error) {153 return newFileDeserializer(typs, buf, func() error { return nil })154}155// NewFileDeserializerFromPath constructs a FileDeserializer by reading it from156// a file.157func NewFileDeserializerFromPath(typs []*types.T, path string) (*FileDeserializer, error) {158 f, err := os.Open(path)159 if err != nil {160 return nil, pgerror.Wrapf(err, pgcode.Io, `opening %s`, path)161 }162 // TODO(dan): This is currently using copy on write semantics because we store163 // the nulls differently in-mem than arrow does and there's an in-place164 // conversion. If we used the same format that arrow does, this could be165 // switched to mmap.RDONLY (it's easy to check, the test fails with a SIGBUS166 // right now with mmap.RDONLY).167 buf, err := mmap.Map(f, mmap.COPY, 0 /* flags */)168 if err != nil {169 return nil, pgerror.Wrapf(err, pgcode.Io, `mmaping %s`, path)170 }171 return newFileDeserializer(typs, buf, buf.Unmap)172}173func newFileDeserializer(174 typs []*types.T, buf []byte, bufCloseFn func() error,175) (*FileDeserializer, error) {176 d := &FileDeserializer{177 buf: buf,178 bufCloseFn: bufCloseFn,179 end: len(buf),180 }181 var err error182 if err = d.init(); err != nil {183 return nil, err184 }185 d.typs = typs186 if d.a, err = NewArrowBatchConverter(typs); err != nil {187 return nil, err188 }189 if d.rb, err = NewRecordBatchSerializer(typs); err != nil {190 return nil, err191 }192 d.arrowScratch = make([]*array.Data, 0, len(typs))193 return d, nil194}195// Close releases any resources held by this deserializer.196func (d *FileDeserializer) Close() error {197 return d.bufCloseFn()198}199// Typs returns the in-memory types for the data stored in this file.200func (d *FileDeserializer) Typs() []*types.T {201 return d.typs202}203// NumBatches returns the number of record batches stored in this file.204func (d *FileDeserializer) NumBatches() int {205 return len(d.recordBatches)206}207// GetBatch fills in the given in-mem batch with the requested on-disk data.208func (d *FileDeserializer) GetBatch(batchIdx int, b coldata.Batch) error {209 rb := d.recordBatches[batchIdx]210 d.idx = int(rb.offset)211 buf, err := d.read(metadataLengthNumBytes + int(rb.metadataLen) + int(rb.bodyLen))212 if err != nil {213 return err214 }215 d.arrowScratch = d.arrowScratch[:0]216 batchLength, err := d.rb.Deserialize(&d.arrowScratch, buf)217 if err != nil {218 return err219 }220 return d.a.ArrowToBatch(d.arrowScratch, batchLength, b)221}222// read gets the next `n` bytes from the start of the buffer, consuming them.223func (d *FileDeserializer) read(n int) ([]byte, error) {224 if d.idx+n > d.end {225 return nil, io.EOF226 }227 start := d.idx228 d.idx += n229 return d.buf[start:d.idx], nil230}231// readBackward gets the `n` bytes from the end of the buffer, consuming them.232func (d *FileDeserializer) readBackward(n int) ([]byte, error) {233 if d.idx+n > d.end {234 return nil, io.EOF235 }236 end := d.end237 d.end -= n238 return d.buf[d.end:end], nil239}240// init verifies the file magic and headers. After init, the `idx` and `end`241// fields are set to the range of record batches and dictionary batches242// described by the arrow spec's streaming format.243func (d *FileDeserializer) init() error {244 // Check the header magic245 if magic, err := d.read(8); err != nil {246 return pgerror.Wrap(err, pgcode.DataException, `verifying arrow file header magic`)247 } else if !bytes.Equal([]byte(fileMagic), magic[:len(fileMagic)]) {248 return errors.New(`arrow file header magic mismatch`)249 }250 if magic, err := d.readBackward(len(fileMagic)); err != nil {251 return pgerror.Wrap(err, pgcode.DataException, `verifying arrow file footer magic`)252 } else if !bytes.Equal([]byte(fileMagic), magic) {253 return errors.New(`arrow file magic footer mismatch`)254 }255 footerSize, err := d.readBackward(4)256 if err != nil {257 return pgerror.Wrap(err, pgcode.DataException, `reading arrow file footer`)258 }259 footerBytes, err := d.readBackward(int(binary.LittleEndian.Uint32(footerSize)))260 if err != nil {261 return pgerror.Wrap(err, pgcode.DataException, `reading arrow file footer`)262 }263 footer := arrowserde.GetRootAsFooter(footerBytes, 0)264 if footer.Version() != arrowserde.MetadataVersionV1 {265 return errors.Errorf(`only arrow V1 is supported got %d`, footer.Version())266 }267 var block arrowserde.Block268 d.recordBatches = d.recordBatches[:0]269 for blockIdx := 0; blockIdx < footer.RecordBatchesLength(); blockIdx++ {270 footer.RecordBatches(&block, blockIdx)271 d.recordBatches = append(d.recordBatches, fileBlock{272 offset: block.Offset(),273 metadataLen: block.MetaDataLength(),274 bodyLen: block.BodyLength(),275 })276 }277 return nil278}279type countingWriter struct {280 wrapped io.Writer281 written int282}283func (w *countingWriter) Write(buf []byte) (int, error) {284 n, err := w.wrapped.Write(buf)285 w.written += n286 return n, err287}288func schema(fb *flatbuffers.Builder, typs []*types.T) flatbuffers.UOffsetT {289 fieldOffsets := make([]flatbuffers.UOffsetT, len(typs))290 for idx, typ := range typs {291 var fbTyp byte292 var fbTypOffset flatbuffers.UOffsetT293 switch typeconv.TypeFamilyToCanonicalTypeFamily(typ.Family()) {294 case types.BoolFamily:295 arrowserde.BoolStart(fb)296 fbTypOffset = arrowserde.BoolEnd(fb)297 fbTyp = arrowserde.TypeBool298 case types.BytesFamily, types.JsonFamily:299 arrowserde.BinaryStart(fb)300 fbTypOffset = arrowserde.BinaryEnd(fb)301 fbTyp = arrowserde.TypeBinary302 case types.IntFamily:303 switch typ.Width() {304 case 16:305 arrowserde.IntStart(fb)306 arrowserde.IntAddBitWidth(fb, 16)307 arrowserde.IntAddIsSigned(fb, 1)308 fbTypOffset = arrowserde.IntEnd(fb)309 fbTyp = arrowserde.TypeInt310 case 32:311 arrowserde.IntStart(fb)312 arrowserde.IntAddBitWidth(fb, 32)313 arrowserde.IntAddIsSigned(fb, 1)314 fbTypOffset = arrowserde.IntEnd(fb)315 fbTyp = arrowserde.TypeInt316 case 0, 64:317 arrowserde.IntStart(fb)318 arrowserde.IntAddBitWidth(fb, 64)319 arrowserde.IntAddIsSigned(fb, 1)320 fbTypOffset = arrowserde.IntEnd(fb)321 fbTyp = arrowserde.TypeInt322 default:323 panic(errors.Errorf(`unexpected int width %d`, typ.Width()))324 }325 case types.FloatFamily:326 arrowserde.FloatingPointStart(fb)327 arrowserde.FloatingPointAddPrecision(fb, arrowserde.PrecisionDOUBLE)328 fbTypOffset = arrowserde.FloatingPointEnd(fb)329 fbTyp = arrowserde.TypeFloatingPoint330 case types.DecimalFamily:331 // Decimals are marshaled into bytes, so we use binary headers.332 arrowserde.BinaryStart(fb)333 fbTypOffset = arrowserde.BinaryEnd(fb)334 fbTyp = arrowserde.TypeDecimal335 case types.TimestampTZFamily:336 // Timestamps are marshaled into bytes, so we use binary headers.337 arrowserde.BinaryStart(fb)338 fbTypOffset = arrowserde.BinaryEnd(fb)339 fbTyp = arrowserde.TypeTimestamp340 case types.IntervalFamily:341 // Intervals are marshaled into bytes, so we use binary headers.342 arrowserde.BinaryStart(fb)343 fbTypOffset = arrowserde.BinaryEnd(fb)344 fbTyp = arrowserde.TypeInterval345 case typeconv.DatumVecCanonicalTypeFamily:346 // Datums are marshaled into bytes, so we use binary headers.347 arrowserde.BinaryStart(fb)348 fbTypOffset = arrowserde.BinaryEnd(fb)349 fbTyp = arrowserde.TypeUtf8350 default:351 panic(errors.Errorf(`don't know how to map %s`, typ))352 }353 arrowserde.FieldStart(fb)354 arrowserde.FieldAddTypeType(fb, fbTyp)355 arrowserde.FieldAddType(fb, fbTypOffset)356 fieldOffsets[idx] = arrowserde.FieldEnd(fb)357 }358 arrowserde.SchemaStartFieldsVector(fb, len(typs))359 // flatbuffers adds everything back to front. Reverse iterate so they're in360 // the right order when they come out.361 for i := len(fieldOffsets) - 1; i >= 0; i-- {362 fb.PrependUOffsetT(fieldOffsets[i])363 }364 fields := fb.EndVector(len(typs))365 arrowserde.SchemaStart(fb)366 arrowserde.SchemaAddFields(fb, fields)367 return arrowserde.SchemaEnd(fb)368}369func schemaMessage(fb *flatbuffers.Builder, typs []*types.T) flatbuffers.UOffsetT {370 schemaOffset := schema(fb, typs)371 arrowserde.MessageStart(fb)372 arrowserde.MessageAddVersion(fb, arrowserde.MetadataVersionV1)373 arrowserde.MessageAddHeaderType(fb, arrowserde.MessageHeaderSchema)374 arrowserde.MessageAddHeader(fb, schemaOffset)375 return arrowserde.MessageEnd(fb)376}377func fileFooter(378 fb *flatbuffers.Builder, typs []*types.T, recordBatches []fileBlock,379) flatbuffers.UOffsetT {380 schemaOffset := schema(fb, typs)381 arrowserde.FooterStartRecordBatchesVector(fb, len(recordBatches))382 // flatbuffers adds everything back to front. Reverse iterate so they're in383 // the right order when they come out.384 for i := len(recordBatches) - 1; i >= 0; i-- {385 rb := recordBatches[i]386 arrowserde.CreateBlock(fb, rb.offset, rb.metadataLen, rb.bodyLen)387 }388 recordBatchesOffset := fb.EndVector(len(recordBatches))389 arrowserde.FooterStart(fb)390 arrowserde.FooterAddVersion(fb, arrowserde.MetadataVersionV1)391 arrowserde.FooterAddSchema(fb, schemaOffset)392 arrowserde.FooterAddRecordBatches(fb, recordBatchesOffset)393 return arrowserde.FooterEnd(fb)394}...

Full Screen

Full Screen

serializer.go

Source:serializer.go Github

copy

Full Screen

2import (3 "errors"4 "reflect"5)6type eventFunc = func() interface{}7type marshal func(v interface{}) ([]byte, error)8type unmarshal func(data []byte, v interface{}) error9// Serializer for json serializes10type Serializer struct {11 eventRegister map[string]eventFunc12 marshal marshal13 unmarshal unmarshal14}15// NewSerializer returns a json Handle16func NewSerializer(marshalF marshal, unmarshalF unmarshal) *Serializer {17 return &Serializer{18 eventRegister: make(map[string]eventFunc),19 marshal: marshalF,20 unmarshal: unmarshalF,21 }22}23var (24 // ErrAggregateNameMissing return if aggregate name is missing25 ErrAggregateNameMissing = errors.New("missing aggregate name")26 // ErrNoEventsToRegister return if no events to register27 ErrNoEventsToRegister = errors.New("no events to register")28 // ErrEventNameMissing return if Event name is missing29 ErrEventNameMissing = errors.New("missing event name")30)31// RegisterTypes events aggregate32func (h *Serializer) RegisterTypes(aggregate Aggregate, events ...eventFunc) error {33 typ := reflect.TypeOf(aggregate).Elem().Name()34 if typ == "" {35 return ErrAggregateNameMissing36 }37 if len(events) == 0 {38 return ErrNoEventsToRegister39 }40 for _, f := range events {41 event := f()42 reason := reflect.TypeOf(event).Elem().Name()43 if reason == "" {44 return ErrEventNameMissing45 }46 h.eventRegister[typ+"_"+reason] = f47 }48 return nil49}50// Type return a struct from the registry51func (h *Serializer) Type(typ, reason string) (eventFunc, bool) {52 d, ok := h.eventRegister[typ+"_"+reason]53 return d, ok54}55// Marshal pass the request to the under laying Marshal method56func (h *Serializer) Marshal(v interface{}) ([]byte, error) {57 return h.marshal(v)58}59// Unmarshal pass the request to the under laying Unmarshal method60func (h *Serializer) Unmarshal(data []byte, v interface{}) error {61 return h.unmarshal(data, v)62}...

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1import "fmt"2type serializer interface {3 typ() string4}5type foo struct{}6func (f *foo) typ() string {7}8type bar struct{}9func (b *bar) typ() string {10}11func main() {12 s = &foo{}13 fmt.Println(s.typ())14 s = &bar{}15 fmt.Println(s.typ())16}

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1func (s serializer) typ() string {2}3func (s serializer) typ() string {4}5func (s serializer) typ() string {6}7func (s serializer) typ() string {8}9func (s serializer) typ() string {10}11func (s serializer) typ() string {12}13func (s serializer) typ() string {14}15func (s serializer) typ() string {16}17func (s serializer) typ() string {18}19func (s serializer) typ() string {20}21func (s serializer) typ() string {22}23func (s serializer) typ() string {24}25func (s serializer) typ() string {26}27func (s serializer) typ() string {28}29func (s serializer) typ() string {30}31func (s serializer) typ() string {32}33func (

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1s.serialize()2s.serialize()3s.serialize()4s.serialize()5s.serialize()6s.serialize()7s.serialize()8s.serialize()9s.serialize()10s.serialize()11s.serialize()12s.serialize()13s.serialize()14s.serialize()15s.serialize()16s.serialize()17s.serialize()18s.serialize()19s.serialize()20s.serialize()21s.serialize()22s.serialize()23s.serialize()24s.serialize()25s.serialize()26s.serialize()

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1func main() {2 s = serializer{typ: "json"}3 s.serialize()4}5func main() {6 s = serializer{typ: "xml"}7 s.serialize()8}9func main() {10 s = serializer{typ: "yaml"}11 s.serialize()12}13func main() {14 s = serializer{typ: "toml"}15 s.serialize()16}17func main() {18 s = serializer{typ: "csv"}19 s.serialize()20}21func main() {22 s = serializer{typ: "xml"}23 s.serialize()24}25func main() {26 s = serializer{typ: "yaml"}27 s.serialize()28}29func main() {30 s = serializer{typ: "toml"}31 s.serialize()32}33func main() {34 s = serializer{typ: "csv"}35 s.serialize()36}37func main() {38 s = serializer{typ: "xml"}39 s.serialize()40}41func main() {42 s = serializer{typ: "yaml"}43 s.serialize()44}45func main() {46 s = serializer{typ: "toml"}47 s.serialize()48}49func main()

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1serializer = new serializer();2serializer.typ(2);3serializer.typ(3);4serializer.typ(4);5serializer.typ(5);6serializer = new serializer();7serializer.typ(2);8serializer.typ(3);9serializer.typ(4);10serializer.typ(5);11type Serializer interface {12 typ(int)13}14func New() Serializer {15 return &serializer{}16}17type serializer struct {18}19func (s *serializer) typ(i int) {20}21import (22func main() {23 s := serializer.New()24 s.typ(2)25 s.typ(3)26 s.typ(4)27 s.typ(5)28}29import (30func main() {31 s := serializer.New()32 s.typ(2)33 s.typ(3)34 s.typ(4)35 s.typ(5)36}

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1import (2type serializer struct {3}4func (s *serializer) serialize() {5 fmt.Println("serializing...")6}7func (s *serializer) deserialize() {8 fmt.Println("deserializing...")9}10func main() {11 s := serializer{typ: "json"}12 s.serialize()13 s.deserialize()14}15import (16type serializer struct {17}18func (s *serializer) serialize() {19 fmt.Println("serializing...")20}21func (s *serializer) deserialize() {22 fmt.Println("deserializing...")23}24type jsonSerializer struct {25}26func main() {27 s := jsonSerializer{}28 s.serialize()29 s.deserialize()30}31import (32type serializer struct {33}34func (s *serializer) serialize() {35 fmt.Println("serializing...")36}37func (s *serializer) deserialize() {38 fmt.Println("deserializing...")39}40type jsonSerializer struct {41}42func (j *jsonSerializer) serialize() {43 fmt.Println("serializing json...")44}45func main() {46 s := jsonSerializer{}47 s.serialize()48 s.deserialize()49}

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3 s := serializer{typ: "json"}4 s.dump()5}6import "fmt"7type serializer struct {8}9func (s *serializer) dump() {10 fmt.Println(s.typ)11}12func main() {13 s := serializer{typ: "json"}14 s.dump()15}16import "fmt"17type serializer struct {18}19func (s *serializer) dump() {20 fmt.Println(s.typ)21}22func main() {23 s := serializer{typ: "json"}24 s.dump()25}26import "fmt"27type serializer struct {28}29func (s *serializer) dump() {30 fmt.Println(s.typ)31}32func main() {33 s := serializer{typ: "json"}34 s.dump()35}36import "fmt"37type serializer struct {38}39func (s *serializer) dump() {40 fmt.Println(s.typ)41}42func main() {43 s := serializer{typ: "json"}44 s.dump()45}46import "fmt"47type serializer struct {48}49func (s *serializer) dump() {50 fmt.Println(s.typ)51}52func main() {53 s := serializer{typ: "json"}54 s.dump()55}56import "fmt"57type serializer struct {58}59func (s *serializer) dump() {60 fmt.Println(s.typ)61}62func main() {63 s := serializer{typ: "json"}64 s.dump()65}66import "fmt"

Full Screen

Full Screen

typ

Using AI Code Generation

copy

Full Screen

1import (2type serializer struct {3}4func (s serializer) typ() string {5}6func main() {7 s := serializer{}8 t := reflect.TypeOf(s)9 fmt.Println(t)10 fmt.Println(t.MethodByName("typ"))11 fmt.Println(t.MethodByName("typ").Func)12 fmt.Println(t.MethodByName("typ").Func.Call([]reflect.Value{reflect.ValueOf(s)}))13 fmt.Println(t.MethodByName("typ").Func.Call([]reflect.Value{reflect.ValueOf(s)})[0].String())14 fmt.Println(t.MethodByName("typ").Func.Call([]reflect.Value{reflect.ValueOf(s)})[0].String() == "s")15}16{typ 0x5c2c0 0 [0] false}

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Syzkaller automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful