http urls monitor.

helper.go 66KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498
  1. // Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
  2. // Use of this source code is governed by a MIT license found in the LICENSE file.
  3. package codec
  4. // Contains code shared by both encode and decode.
  5. // Some shared ideas around encoding/decoding
  6. // ------------------------------------------
  7. //
  8. // If an interface{} is passed, we first do a type assertion to see if it is
  9. // a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
  10. //
  11. // If we start with a reflect.Value, we are already in reflect.Value land and
  12. // will try to grab the function for the underlying Type and directly call that function.
  13. // This is more performant than calling reflect.Value.Interface().
  14. //
  15. // This still helps us bypass many layers of reflection, and give best performance.
  16. //
  17. // Containers
  18. // ------------
  19. // Containers in the stream are either associative arrays (key-value pairs) or
  20. // regular arrays (indexed by incrementing integers).
  21. //
  22. // Some streams support indefinite-length containers, and use a breaking
  23. // byte-sequence to denote that the container has come to an end.
  24. //
  25. // Some streams also are text-based, and use explicit separators to denote the
  26. // end/beginning of different values.
  27. //
  28. // During encode, we use a high-level condition to determine how to iterate through
  29. // the container. That decision is based on whether the container is text-based (with
  30. // separators) or binary (without separators). If binary, we do not even call the
  31. // encoding of separators.
  32. //
  33. // During decode, we use a different high-level condition to determine how to iterate
  34. // through the containers. That decision is based on whether the stream contained
  35. // a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
  36. // it has to be binary, and we do not even try to read separators.
  37. //
  38. // Philosophy
  39. // ------------
  40. // On decode, this codec will update containers appropriately:
  41. // - If struct, update fields from stream into fields of struct.
  42. // If field in stream not found in struct, handle appropriately (based on option).
  43. // If a struct field has no corresponding value in the stream, leave it AS IS.
  44. // If nil in stream, set value to nil/zero value.
  45. // - If map, update map from stream.
  46. // If the stream value is NIL, set the map to nil.
  47. // - if slice, try to update up to length of array in stream.
  48. // if container len is less than stream array length,
  49. // and container cannot be expanded, handled (based on option).
  50. // This means you can decode 4-element stream array into 1-element array.
  51. //
  52. // ------------------------------------
  53. // On encode, user can specify omitEmpty. This means that the value will be omitted
  54. // if the zero value. The problem may occur during decode, where omitted values do not affect
  55. // the value being decoded into. This means that if decoding into a struct with an
  56. // int field with current value=5, and the field is omitted in the stream, then after
  57. // decoding, the value will still be 5 (not 0).
  58. // omitEmpty only works if you guarantee that you always decode into zero-values.
  59. //
  60. // ------------------------------------
  61. // We could have truncated a map to remove keys not available in the stream,
  62. // or set values in the struct which are not in the stream to their zero values.
  63. // We decided against it because there is no efficient way to do it.
  64. // We may introduce it as an option later.
  65. // However, that will require enabling it for both runtime and code generation modes.
  66. //
  67. // To support truncate, we need to do 2 passes over the container:
  68. // map
  69. // - first collect all keys (e.g. in k1)
  70. // - for each key in stream, mark k1 that the key should not be removed
  71. // - after updating map, do second pass and call delete for all keys in k1 which are not marked
  72. // struct:
  73. // - for each field, track the *typeInfo s1
  74. // - iterate through all s1, and for each one not marked, set value to zero
  75. // - this involves checking the possible anonymous fields which are nil ptrs.
  76. // too much work.
  77. //
  78. // ------------------------------------------
  79. // Error Handling is done within the library using panic.
  80. //
  81. // This way, the code doesn't have to keep checking if an error has happened,
  82. // and we don't have to keep sending the error value along with each call
  83. // or storing it in the En|Decoder and checking it constantly along the way.
  84. //
  85. // The disadvantage is that small functions which use panics cannot be inlined.
  86. // The code accounts for that by only using panics behind an interface;
  87. // since interface calls cannot be inlined, this is irrelevant.
  88. //
  89. // We considered storing the error is En|Decoder.
  90. // - once it has its err field set, it cannot be used again.
  91. // - panicing will be optional, controlled by const flag.
  92. // - code should always check error first and return early.
  93. // We eventually decided against it as it makes the code clumsier to always
  94. // check for these error conditions.
  95. import (
  96. "bytes"
  97. "encoding"
  98. "encoding/binary"
  99. "errors"
  100. "fmt"
  101. "io"
  102. "math"
  103. "reflect"
  104. "sort"
  105. "strconv"
  106. "strings"
  107. "sync"
  108. "time"
  109. )
  110. const (
  111. scratchByteArrayLen = 32
  112. // initCollectionCap = 16 // 32 is defensive. 16 is preferred.
  113. // Support encoding.(Binary|Text)(Unm|M)arshaler.
  114. // This constant flag will enable or disable it.
  115. supportMarshalInterfaces = true
  116. // for debugging, set this to false, to catch panic traces.
  117. // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
  118. recoverPanicToErr = true
  119. // arrayCacheLen is the length of the cache used in encoder or decoder for
  120. // allowing zero-alloc initialization.
  121. arrayCacheLen = 8
  122. // size of the cacheline: defaulting to value for archs: amd64, arm64, 386
  123. // should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
  124. cacheLineSize = 64
  125. wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
  126. wordSize = wordSizeBits / 8
  127. maxLevelsEmbedding = 14 // use this, so structFieldInfo fits into 8 bytes
  128. )
  129. var (
  130. oneByteArr = [1]byte{0}
  131. zeroByteSlice = oneByteArr[:0:0]
  132. )
  133. var codecgen bool
  134. var refBitset bitset32
  135. var pool pooler
  136. var panicv panicHdl
  137. func init() {
  138. pool.init()
  139. refBitset.set(byte(reflect.Map))
  140. refBitset.set(byte(reflect.Ptr))
  141. refBitset.set(byte(reflect.Func))
  142. refBitset.set(byte(reflect.Chan))
  143. }
  144. type charEncoding uint8
  145. const (
  146. cRAW charEncoding = iota
  147. cUTF8
  148. cUTF16LE
  149. cUTF16BE
  150. cUTF32LE
  151. cUTF32BE
  152. )
  153. // valueType is the stream type
  154. type valueType uint8
  155. const (
  156. valueTypeUnset valueType = iota
  157. valueTypeNil
  158. valueTypeInt
  159. valueTypeUint
  160. valueTypeFloat
  161. valueTypeBool
  162. valueTypeString
  163. valueTypeSymbol
  164. valueTypeBytes
  165. valueTypeMap
  166. valueTypeArray
  167. valueTypeTime
  168. valueTypeExt
  169. // valueTypeInvalid = 0xff
  170. )
  171. var valueTypeStrings = [...]string{
  172. "Unset",
  173. "Nil",
  174. "Int",
  175. "Uint",
  176. "Float",
  177. "Bool",
  178. "String",
  179. "Symbol",
  180. "Bytes",
  181. "Map",
  182. "Array",
  183. "Timestamp",
  184. "Ext",
  185. }
  186. func (x valueType) String() string {
  187. if int(x) < len(valueTypeStrings) {
  188. return valueTypeStrings[x]
  189. }
  190. return strconv.FormatInt(int64(x), 10)
  191. }
  192. type seqType uint8
  193. const (
  194. _ seqType = iota
  195. seqTypeArray
  196. seqTypeSlice
  197. seqTypeChan
  198. )
  199. // note that containerMapStart and containerArraySend are not sent.
  200. // This is because the ReadXXXStart and EncodeXXXStart already does these.
  201. type containerState uint8
  202. const (
  203. _ containerState = iota
  204. containerMapStart // slot left open, since Driver method already covers it
  205. containerMapKey
  206. containerMapValue
  207. containerMapEnd
  208. containerArrayStart // slot left open, since Driver methods already cover it
  209. containerArrayElem
  210. containerArrayEnd
  211. )
  212. // // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
  213. // type sfiIdx struct {
  214. // name string
  215. // index int
  216. // }
  217. // do not recurse if a containing type refers to an embedded type
  218. // which refers back to its containing type (via a pointer).
  219. // The second time this back-reference happens, break out,
  220. // so as not to cause an infinite loop.
  221. const rgetMaxRecursion = 2
  222. // Anecdotally, we believe most types have <= 12 fields.
  223. // - even Java's PMD rules set TooManyFields threshold to 15.
  224. // However, go has embedded fields, which should be regarded as
  225. // top level, allowing structs to possibly double or triple.
  226. // In addition, we don't want to keep creating transient arrays,
  227. // especially for the sfi index tracking, and the evtypes tracking.
  228. //
  229. // So - try to keep typeInfoLoadArray within 2K bytes
  230. const (
  231. typeInfoLoadArraySfisLen = 16
  232. typeInfoLoadArraySfiidxLen = 8 * 112
  233. typeInfoLoadArrayEtypesLen = 12
  234. typeInfoLoadArrayBLen = 8 * 4
  235. )
  236. type typeInfoLoad struct {
  237. // fNames []string
  238. // encNames []string
  239. etypes []uintptr
  240. sfis []structFieldInfo
  241. }
  242. type typeInfoLoadArray struct {
  243. // fNames [typeInfoLoadArrayLen]string
  244. // encNames [typeInfoLoadArrayLen]string
  245. sfis [typeInfoLoadArraySfisLen]structFieldInfo
  246. sfiidx [typeInfoLoadArraySfiidxLen]byte
  247. etypes [typeInfoLoadArrayEtypesLen]uintptr
  248. b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names
  249. }
  250. // mirror json.Marshaler and json.Unmarshaler here,
  251. // so we don't import the encoding/json package
  252. type jsonMarshaler interface {
  253. MarshalJSON() ([]byte, error)
  254. }
  255. type jsonUnmarshaler interface {
  256. UnmarshalJSON([]byte) error
  257. }
  258. type isZeroer interface {
  259. IsZero() bool
  260. }
  261. type codecError struct {
  262. name string
  263. err interface{}
  264. }
  265. func (e codecError) Cause() error {
  266. switch xerr := e.err.(type) {
  267. case nil:
  268. return nil
  269. case error:
  270. return xerr
  271. case string:
  272. return errors.New(xerr)
  273. case fmt.Stringer:
  274. return errors.New(xerr.String())
  275. default:
  276. return fmt.Errorf("%v", e.err)
  277. }
  278. }
  279. func (e codecError) Error() string {
  280. return fmt.Sprintf("%s error: %v", e.name, e.err)
  281. }
  282. // type byteAccepter func(byte) bool
  283. var (
  284. bigen = binary.BigEndian
  285. structInfoFieldName = "_struct"
  286. mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
  287. mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
  288. intfSliceTyp = reflect.TypeOf([]interface{}(nil))
  289. intfTyp = intfSliceTyp.Elem()
  290. reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
  291. stringTyp = reflect.TypeOf("")
  292. timeTyp = reflect.TypeOf(time.Time{})
  293. rawExtTyp = reflect.TypeOf(RawExt{})
  294. rawTyp = reflect.TypeOf(Raw{})
  295. uintptrTyp = reflect.TypeOf(uintptr(0))
  296. uint8Typ = reflect.TypeOf(uint8(0))
  297. uint8SliceTyp = reflect.TypeOf([]uint8(nil))
  298. uintTyp = reflect.TypeOf(uint(0))
  299. intTyp = reflect.TypeOf(int(0))
  300. mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
  301. binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
  302. binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
  303. textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
  304. textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
  305. jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
  306. jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
  307. selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
  308. missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem()
  309. iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem()
  310. uint8TypId = rt2id(uint8Typ)
  311. uint8SliceTypId = rt2id(uint8SliceTyp)
  312. rawExtTypId = rt2id(rawExtTyp)
  313. rawTypId = rt2id(rawTyp)
  314. intfTypId = rt2id(intfTyp)
  315. timeTypId = rt2id(timeTyp)
  316. stringTypId = rt2id(stringTyp)
  317. mapStrIntfTypId = rt2id(mapStrIntfTyp)
  318. mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
  319. intfSliceTypId = rt2id(intfSliceTyp)
  320. // mapBySliceTypId = rt2id(mapBySliceTyp)
  321. intBitsize = uint8(intTyp.Bits())
  322. uintBitsize = uint8(uintTyp.Bits())
  323. bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
  324. bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
  325. chkOvf checkOverflow
  326. errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo")
  327. )
  328. var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
  329. var immutableKindsSet = [32]bool{
  330. // reflect.Invalid: ,
  331. reflect.Bool: true,
  332. reflect.Int: true,
  333. reflect.Int8: true,
  334. reflect.Int16: true,
  335. reflect.Int32: true,
  336. reflect.Int64: true,
  337. reflect.Uint: true,
  338. reflect.Uint8: true,
  339. reflect.Uint16: true,
  340. reflect.Uint32: true,
  341. reflect.Uint64: true,
  342. reflect.Uintptr: true,
  343. reflect.Float32: true,
  344. reflect.Float64: true,
  345. reflect.Complex64: true,
  346. reflect.Complex128: true,
  347. // reflect.Array
  348. // reflect.Chan
  349. // reflect.Func: true,
  350. // reflect.Interface
  351. // reflect.Map
  352. // reflect.Ptr
  353. // reflect.Slice
  354. reflect.String: true,
  355. // reflect.Struct
  356. // reflect.UnsafePointer
  357. }
  358. // Selfer defines methods by which a value can encode or decode itself.
  359. //
  360. // Any type which implements Selfer will be able to encode or decode itself.
  361. // Consequently, during (en|de)code, this takes precedence over
  362. // (text|binary)(M|Unm)arshal or extension support.
  363. //
  364. // Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
  365. // This is because, during each decode, we first check the the next set of bytes
  366. // represent nil, and if so, we just set the value to nil.
  367. type Selfer interface {
  368. CodecEncodeSelf(*Encoder)
  369. CodecDecodeSelf(*Decoder)
  370. }
  371. // MissingFieldPair is a convenience value composed of the field name and the value of the field.
  372. type MissingFieldPair struct {
  373. Field string
  374. Value interface{}
  375. }
  376. // MissingFielder defines the interface allowing structs to internally decode or encode
  377. // values which do not map to struct fields.
  378. //
  379. // We expect that this interface is bound to a pointer type (so the mutation function works).
  380. //
  381. // A use-case is if a version of a type unexports a field, but you want compatibility between
  382. // both versions during encoding and decoding.
  383. //
  384. // Note that the interface is completely ignored during codecgen.
  385. type MissingFielder interface {
  386. // CodecMissingField is called to set a missing field and value pair.
  387. //
  388. // It returns true if the missing field was set on the struct.
  389. CodecMissingField(field []byte, value interface{}) bool
  390. // CodecMissingFields returns the set of fields which are not struct fields
  391. CodecMissingFields() []MissingFieldPair
  392. }
  393. // MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream.
  394. // The slice contains a sequence of key-value pairs.
  395. // This affords storing a map in a specific sequence in the stream.
  396. //
  397. // Example usage:
  398. // type T1 []string // or []int or []Point or any other "slice" type
  399. // func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
  400. // type T2 struct { KeyValues T1 }
  401. //
  402. // var kvs = []string{"one", "1", "two", "2", "three", "3"}
  403. // var v2 = T2{ KeyValues: T1(kvs) }
  404. // // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
  405. //
  406. // The support of MapBySlice affords the following:
  407. // - A slice type which implements MapBySlice will be encoded as a map
  408. // - A slice can be decoded from a map in the stream
  409. // - It MUST be a slice type (not a pointer receiver) that implements MapBySlice
  410. type MapBySlice interface {
  411. MapBySlice()
  412. }
  413. // BasicHandle encapsulates the common options and extension functions.
  414. //
  415. // Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
  416. type BasicHandle struct {
  417. // BasicHandle is always a part of a different type.
  418. // It doesn't have to fit into it own cache lines.
  419. // TypeInfos is used to get the type info for any type.
  420. //
  421. // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
  422. TypeInfos *TypeInfos
  423. // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls).
  424. // If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
  425. // Thses slices are used all the time, so keep as slices (not pointers).
  426. extHandle
  427. intf2impls
  428. RPCOptions
  429. // ---- cache line
  430. DecodeOptions
  431. // ---- cache line
  432. EncodeOptions
  433. // noBuiltInTypeChecker
  434. }
  435. func (x *BasicHandle) getBasicHandle() *BasicHandle {
  436. return x
  437. }
  438. func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  439. if x.TypeInfos == nil {
  440. return defTypeInfos.get(rtid, rt)
  441. }
  442. return x.TypeInfos.get(rtid, rt)
  443. }
  444. // Handle is the interface for a specific encoding format.
  445. //
  446. // Typically, a Handle is pre-configured before first time use,
  447. // and not modified while in use. Such a pre-configured Handle
  448. // is safe for concurrent access.
  449. type Handle interface {
  450. Name() string
  451. getBasicHandle() *BasicHandle
  452. recreateEncDriver(encDriver) bool
  453. newEncDriver(w *Encoder) encDriver
  454. newDecDriver(r *Decoder) decDriver
  455. isBinary() bool
  456. hasElemSeparators() bool
  457. // IsBuiltinType(rtid uintptr) bool
  458. }
  459. // Raw represents raw formatted bytes.
  460. // We "blindly" store it during encode and retrieve the raw bytes during decode.
  461. // Note: it is dangerous during encode, so we may gate the behaviour
  462. // behind an Encode flag which must be explicitly set.
  463. type Raw []byte
  464. // RawExt represents raw unprocessed extension data.
  465. // Some codecs will decode extension data as a *RawExt
  466. // if there is no registered extension for the tag.
  467. //
  468. // Only one of Data or Value is nil.
  469. // If Data is nil, then the content of the RawExt is in the Value.
  470. type RawExt struct {
  471. Tag uint64
  472. // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
  473. // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
  474. Data []byte
  475. // Value represents the extension, if Data is nil.
  476. // Value is used by codecs (e.g. cbor, json) which leverage the format to do
  477. // custom serialization of the types.
  478. Value interface{}
  479. }
  480. // BytesExt handles custom (de)serialization of types to/from []byte.
  481. // It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
  482. type BytesExt interface {
  483. // WriteExt converts a value to a []byte.
  484. //
  485. // Note: v is a pointer iff the registered extension type is a struct or array kind.
  486. WriteExt(v interface{}) []byte
  487. // ReadExt updates a value from a []byte.
  488. //
  489. // Note: dst is always a pointer kind to the registered extension type.
  490. ReadExt(dst interface{}, src []byte)
  491. }
  492. // InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
  493. // The Encoder or Decoder will then handle the further (de)serialization of that known type.
  494. //
  495. // It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
  496. type InterfaceExt interface {
  497. // ConvertExt converts a value into a simpler interface for easy encoding
  498. // e.g. convert time.Time to int64.
  499. //
  500. // Note: v is a pointer iff the registered extension type is a struct or array kind.
  501. ConvertExt(v interface{}) interface{}
  502. // UpdateExt updates a value from a simpler interface for easy decoding
  503. // e.g. convert int64 to time.Time.
  504. //
  505. // Note: dst is always a pointer kind to the registered extension type.
  506. UpdateExt(dst interface{}, src interface{})
  507. }
  508. // Ext handles custom (de)serialization of custom types / extensions.
  509. type Ext interface {
  510. BytesExt
  511. InterfaceExt
  512. }
  513. // addExtWrapper is a wrapper implementation to support former AddExt exported method.
  514. type addExtWrapper struct {
  515. encFn func(reflect.Value) ([]byte, error)
  516. decFn func(reflect.Value, []byte) error
  517. }
  518. func (x addExtWrapper) WriteExt(v interface{}) []byte {
  519. bs, err := x.encFn(reflect.ValueOf(v))
  520. if err != nil {
  521. panic(err)
  522. }
  523. return bs
  524. }
  525. func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
  526. if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
  527. panic(err)
  528. }
  529. }
  530. func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
  531. return x.WriteExt(v)
  532. }
  533. func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
  534. x.ReadExt(dest, v.([]byte))
  535. }
  536. type extWrapper struct {
  537. BytesExt
  538. InterfaceExt
  539. }
  540. type bytesExtFailer struct{}
  541. func (bytesExtFailer) WriteExt(v interface{}) []byte {
  542. panicv.errorstr("BytesExt.WriteExt is not supported")
  543. return nil
  544. }
  545. func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
  546. panicv.errorstr("BytesExt.ReadExt is not supported")
  547. }
  548. type interfaceExtFailer struct{}
  549. func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
  550. panicv.errorstr("InterfaceExt.ConvertExt is not supported")
  551. return nil
  552. }
  553. func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
  554. panicv.errorstr("InterfaceExt.UpdateExt is not supported")
  555. }
  556. type binaryEncodingType struct{}
  557. func (binaryEncodingType) isBinary() bool { return true }
  558. type textEncodingType struct{}
  559. func (textEncodingType) isBinary() bool { return false }
  560. // noBuiltInTypes is embedded into many types which do not support builtins
  561. // e.g. msgpack, simple, cbor.
  562. // type noBuiltInTypeChecker struct{}
  563. // func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false }
  564. // type noBuiltInTypes struct{ noBuiltInTypeChecker }
  565. type noBuiltInTypes struct{}
  566. func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
  567. func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
  568. // type noStreamingCodec struct{}
  569. // func (noStreamingCodec) CheckBreak() bool { return false }
  570. // func (noStreamingCodec) hasElemSeparators() bool { return false }
  571. type noElemSeparators struct{}
  572. func (noElemSeparators) hasElemSeparators() (v bool) { return }
  573. func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return }
  574. // bigenHelper.
  575. // Users must already slice the x completely, because we will not reslice.
  576. type bigenHelper struct {
  577. x []byte // must be correctly sliced to appropriate len. slicing is a cost.
  578. w encWriter
  579. }
  580. func (z bigenHelper) writeUint16(v uint16) {
  581. bigen.PutUint16(z.x, v)
  582. z.w.writeb(z.x)
  583. }
  584. func (z bigenHelper) writeUint32(v uint32) {
  585. bigen.PutUint32(z.x, v)
  586. z.w.writeb(z.x)
  587. }
  588. func (z bigenHelper) writeUint64(v uint64) {
  589. bigen.PutUint64(z.x, v)
  590. z.w.writeb(z.x)
  591. }
  592. type extTypeTagFn struct {
  593. rtid uintptr
  594. rtidptr uintptr
  595. rt reflect.Type
  596. tag uint64
  597. ext Ext
  598. _ [1]uint64 // padding
  599. }
  600. type extHandle []extTypeTagFn
  601. // AddExt registes an encode and decode function for a reflect.Type.
  602. // To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
  603. //
  604. // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  605. func (o *extHandle) AddExt(rt reflect.Type, tag byte,
  606. encfn func(reflect.Value) ([]byte, error),
  607. decfn func(reflect.Value, []byte) error) (err error) {
  608. if encfn == nil || decfn == nil {
  609. return o.SetExt(rt, uint64(tag), nil)
  610. }
  611. return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
  612. }
  613. // SetExt will set the extension for a tag and reflect.Type.
  614. // Note that the type must be a named type, and specifically not a pointer or Interface.
  615. // An error is returned if that is not honored.
  616. // To Deregister an ext, call SetExt with nil Ext.
  617. //
  618. // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  619. func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
  620. // o is a pointer, because we may need to initialize it
  621. rk := rt.Kind()
  622. for rk == reflect.Ptr {
  623. rt = rt.Elem()
  624. rk = rt.Kind()
  625. }
  626. if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
  627. return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
  628. }
  629. rtid := rt2id(rt)
  630. switch rtid {
  631. case timeTypId, rawTypId, rawExtTypId:
  632. // all natively supported type, so cannot have an extension
  633. return // TODO: should we silently ignore, or return an error???
  634. }
  635. // if o == nil {
  636. // return errors.New("codec.Handle.SetExt: extHandle not initialized")
  637. // }
  638. o2 := *o
  639. // if o2 == nil {
  640. // return errors.New("codec.Handle.SetExt: extHandle not initialized")
  641. // }
  642. for i := range o2 {
  643. v := &o2[i]
  644. if v.rtid == rtid {
  645. v.tag, v.ext = tag, ext
  646. return
  647. }
  648. }
  649. rtidptr := rt2id(reflect.PtrTo(rt))
  650. *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}})
  651. return
  652. }
  653. func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) {
  654. for i := range o {
  655. v = &o[i]
  656. if v.rtid == rtid || v.rtidptr == rtid {
  657. return
  658. }
  659. }
  660. return nil
  661. }
  662. func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
  663. for i := range o {
  664. v = &o[i]
  665. if v.tag == tag {
  666. return
  667. }
  668. }
  669. return nil
  670. }
  671. type intf2impl struct {
  672. rtid uintptr // for intf
  673. impl reflect.Type
  674. // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned.
  675. }
  676. type intf2impls []intf2impl
  677. // Intf2Impl maps an interface to an implementing type.
  678. // This allows us support infering the concrete type
  679. // and populating it when passed an interface.
  680. // e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
  681. //
  682. // Passing a nil impl will clear the mapping.
  683. func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
  684. if impl != nil && !impl.Implements(intf) {
  685. return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
  686. }
  687. rtid := rt2id(intf)
  688. o2 := *o
  689. for i := range o2 {
  690. v := &o2[i]
  691. if v.rtid == rtid {
  692. v.impl = impl
  693. return
  694. }
  695. }
  696. *o = append(o2, intf2impl{rtid, impl})
  697. return
  698. }
  699. func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
  700. for i := range o {
  701. v := &o[i]
  702. if v.rtid == rtid {
  703. if v.impl == nil {
  704. return
  705. }
  706. if v.impl.Kind() == reflect.Ptr {
  707. return reflect.New(v.impl.Elem())
  708. }
  709. return reflect.New(v.impl).Elem()
  710. }
  711. }
  712. return
  713. }
  714. type structFieldInfoFlag uint8
  715. const (
  716. _ structFieldInfoFlag = 1 << iota
  717. structFieldInfoFlagReady
  718. structFieldInfoFlagOmitEmpty
  719. )
  720. func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) {
  721. *x = *x | f
  722. }
  723. func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) {
  724. *x = *x &^ f
  725. }
  726. func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool {
  727. return x&f != 0
  728. }
  729. func (x structFieldInfoFlag) omitEmpty() bool {
  730. return x.flagGet(structFieldInfoFlagOmitEmpty)
  731. }
  732. func (x structFieldInfoFlag) ready() bool {
  733. return x.flagGet(structFieldInfoFlagReady)
  734. }
  735. type structFieldInfo struct {
  736. encName string // encode name
  737. fieldName string // field name
  738. is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct
  739. nis uint8 // num levels of embedding. if 1, then it's not embedded.
  740. encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
  741. structFieldInfoFlag
  742. _ [1]byte // padding
  743. }
  744. func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
  745. if v, valid := si.field(v, false); valid {
  746. v.Set(reflect.Zero(v.Type()))
  747. }
  748. }
  749. // rv returns the field of the struct.
  750. // If anonymous, it returns an Invalid
  751. func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) {
  752. // replicate FieldByIndex
  753. for i, x := range si.is {
  754. if uint8(i) == si.nis {
  755. break
  756. }
  757. if v, valid = baseStructRv(v, update); !valid {
  758. return
  759. }
  760. v = v.Field(int(x))
  761. }
  762. return v, true
  763. }
  764. // func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value {
  765. // v, _ = si.field(v, update)
  766. // return v
  767. // }
  768. func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
  769. keytype = valueTypeString // default
  770. if stag == "" {
  771. return
  772. }
  773. for i, s := range strings.Split(stag, ",") {
  774. if i == 0 {
  775. } else {
  776. switch s {
  777. case "omitempty":
  778. omitEmpty = true
  779. case "toarray":
  780. toArray = true
  781. case "int":
  782. keytype = valueTypeInt
  783. case "uint":
  784. keytype = valueTypeUint
  785. case "float":
  786. keytype = valueTypeFloat
  787. // case "bool":
  788. // keytype = valueTypeBool
  789. case "string":
  790. keytype = valueTypeString
  791. }
  792. }
  793. }
  794. return
  795. }
  796. func (si *structFieldInfo) parseTag(stag string) {
  797. // if fname == "" {
  798. // panic(errNoFieldNameToStructFieldInfo)
  799. // }
  800. if stag == "" {
  801. return
  802. }
  803. for i, s := range strings.Split(stag, ",") {
  804. if i == 0 {
  805. if s != "" {
  806. si.encName = s
  807. }
  808. } else {
  809. switch s {
  810. case "omitempty":
  811. si.flagSet(structFieldInfoFlagOmitEmpty)
  812. // si.omitEmpty = true
  813. // case "toarray":
  814. // si.toArray = true
  815. }
  816. }
  817. }
  818. }
  819. type sfiSortedByEncName []*structFieldInfo
  820. func (p sfiSortedByEncName) Len() int {
  821. return len(p)
  822. }
  823. func (p sfiSortedByEncName) Less(i, j int) bool {
  824. return p[i].encName < p[j].encName
  825. }
  826. func (p sfiSortedByEncName) Swap(i, j int) {
  827. p[i], p[j] = p[j], p[i]
  828. }
  829. const structFieldNodeNumToCache = 4
  830. type structFieldNodeCache struct {
  831. rv [structFieldNodeNumToCache]reflect.Value
  832. idx [structFieldNodeNumToCache]uint32
  833. num uint8
  834. }
  835. func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) {
  836. for i, k := range &x.idx {
  837. if uint8(i) == x.num {
  838. return // break
  839. }
  840. if key == k {
  841. return x.rv[i], true
  842. }
  843. }
  844. return
  845. }
  846. func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) {
  847. if x.num < structFieldNodeNumToCache {
  848. x.rv[x.num] = fv
  849. x.idx[x.num] = key
  850. x.num++
  851. return
  852. }
  853. }
  854. type structFieldNode struct {
  855. v reflect.Value
  856. cache2 structFieldNodeCache
  857. cache3 structFieldNodeCache
  858. update bool
  859. }
  860. func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) {
  861. // return si.fieldval(x.v, x.update)
  862. // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding
  863. // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc.
  864. var valid bool
  865. switch si.nis {
  866. case 1:
  867. fv = x.v.Field(int(si.is[0]))
  868. case 2:
  869. if fv, valid = x.cache2.get(uint32(si.is[0])); valid {
  870. fv = fv.Field(int(si.is[1]))
  871. return
  872. }
  873. fv = x.v.Field(int(si.is[0]))
  874. if fv, valid = baseStructRv(fv, x.update); !valid {
  875. return
  876. }
  877. x.cache2.tryAdd(fv, uint32(si.is[0]))
  878. fv = fv.Field(int(si.is[1]))
  879. case 3:
  880. var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1])
  881. if fv, valid = x.cache3.get(key); valid {
  882. fv = fv.Field(int(si.is[2]))
  883. return
  884. }
  885. fv = x.v.Field(int(si.is[0]))
  886. if fv, valid = baseStructRv(fv, x.update); !valid {
  887. return
  888. }
  889. fv = fv.Field(int(si.is[1]))
  890. if fv, valid = baseStructRv(fv, x.update); !valid {
  891. return
  892. }
  893. x.cache3.tryAdd(fv, key)
  894. fv = fv.Field(int(si.is[2]))
  895. default:
  896. fv, _ = si.field(x.v, x.update)
  897. }
  898. return
  899. }
  900. func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) {
  901. for v.Kind() == reflect.Ptr {
  902. if v.IsNil() {
  903. if !update {
  904. return
  905. }
  906. v.Set(reflect.New(v.Type().Elem()))
  907. }
  908. v = v.Elem()
  909. }
  910. return v, true
  911. }
  912. type typeInfoFlag uint8
  913. const (
  914. typeInfoFlagComparable = 1 << iota
  915. typeInfoFlagIsZeroer
  916. typeInfoFlagIsZeroerPtr
  917. )
  918. // typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence.
  919. //
  920. // During an encode/decode sequence, we work as below:
  921. // - If base is a built in type, en/decode base value
  922. // - If base is registered as an extension, en/decode base value
  923. // - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
  924. // - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
  925. // - Else decode appropriately based on the reflect.Kind
  926. type typeInfo struct {
  927. rt reflect.Type
  928. elem reflect.Type
  929. pkgpath string
  930. rtid uintptr
  931. // rv0 reflect.Value // saved zero value, used if immutableKind
  932. numMeth uint16 // number of methods
  933. kind uint8
  934. chandir uint8
  935. anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty"
  936. toArray bool // whether this (struct) type should be encoded as an array
  937. keyType valueType // if struct, how is the field name stored in a stream? default is string
  938. mbs bool // base type (T or *T) is a MapBySlice
  939. // ---- cpu cache line boundary?
  940. sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map.
  941. sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array.
  942. key reflect.Type
  943. // ---- cpu cache line boundary?
  944. // sfis []structFieldInfo // all sfi, in src order, as created.
  945. sfiNamesSort []byte // all names, with indexes into the sfiSort
  946. // format of marshal type fields below: [btj][mu]p? OR csp?
  947. bm bool // T is a binaryMarshaler
  948. bmp bool // *T is a binaryMarshaler
  949. bu bool // T is a binaryUnmarshaler
  950. bup bool // *T is a binaryUnmarshaler
  951. tm bool // T is a textMarshaler
  952. tmp bool // *T is a textMarshaler
  953. tu bool // T is a textUnmarshaler
  954. tup bool // *T is a textUnmarshaler
  955. jm bool // T is a jsonMarshaler
  956. jmp bool // *T is a jsonMarshaler
  957. ju bool // T is a jsonUnmarshaler
  958. jup bool // *T is a jsonUnmarshaler
  959. cs bool // T is a Selfer
  960. csp bool // *T is a Selfer
  961. mf bool // T is a MissingFielder
  962. mfp bool // *T is a MissingFielder
  963. // other flags, with individual bits representing if set.
  964. flags typeInfoFlag
  965. infoFieldOmitempty bool
  966. _ [6]byte // padding
  967. _ [2]uint64 // padding
  968. }
  969. func (ti *typeInfo) isFlag(f typeInfoFlag) bool {
  970. return ti.flags&f != 0
  971. }
  972. func (ti *typeInfo) indexForEncName(name []byte) (index int16) {
  973. var sn []byte
  974. if len(name)+2 <= 32 {
  975. var buf [32]byte // should not escape
  976. sn = buf[:len(name)+2]
  977. } else {
  978. sn = make([]byte, len(name)+2)
  979. }
  980. copy(sn[1:], name)
  981. sn[0], sn[len(sn)-1] = tiSep2(name), 0xff
  982. j := bytes.Index(ti.sfiNamesSort, sn)
  983. if j < 0 {
  984. return -1
  985. }
  986. index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8)
  987. return
  988. }
  989. type rtid2ti struct {
  990. rtid uintptr
  991. ti *typeInfo
  992. }
  993. // TypeInfos caches typeInfo for each type on first inspection.
  994. //
  995. // It is configured with a set of tag keys, which are used to get
  996. // configuration for the type.
  997. type TypeInfos struct {
  998. // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected
  999. infos atomicTypeInfoSlice
  1000. mu sync.Mutex
  1001. tags []string
  1002. _ [2]uint64 // padding
  1003. }
  1004. // NewTypeInfos creates a TypeInfos given a set of struct tags keys.
  1005. //
  1006. // This allows users customize the struct tag keys which contain configuration
  1007. // of their types.
  1008. func NewTypeInfos(tags []string) *TypeInfos {
  1009. return &TypeInfos{tags: tags}
  1010. }
  1011. func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
  1012. // check for tags: codec, json, in that order.
  1013. // this allows seamless support for many configured structs.
  1014. for _, x := range x.tags {
  1015. s = t.Get(x)
  1016. if s != "" {
  1017. return s
  1018. }
  1019. }
  1020. return
  1021. }
  1022. func (x *TypeInfos) find(s []rtid2ti, rtid uintptr) (idx int, ti *typeInfo) {
  1023. // binary search. adapted from sort/search.go.
  1024. // if sp == nil {
  1025. // return -1, nil
  1026. // }
  1027. // s := *sp
  1028. h, i, j := 0, 0, len(s)
  1029. for i < j {
  1030. h = i + (j-i)/2
  1031. if s[h].rtid < rtid {
  1032. i = h + 1
  1033. } else {
  1034. j = h
  1035. }
  1036. }
  1037. if i < len(s) && s[i].rtid == rtid {
  1038. return i, s[i].ti
  1039. }
  1040. return i, nil
  1041. }
  1042. func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  1043. sp := x.infos.load()
  1044. var idx int
  1045. if sp != nil {
  1046. idx, pti = x.find(sp, rtid)
  1047. if pti != nil {
  1048. return
  1049. }
  1050. }
  1051. rk := rt.Kind()
  1052. if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
  1053. panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
  1054. }
  1055. // do not hold lock while computing this.
  1056. // it may lead to duplication, but that's ok.
  1057. ti := typeInfo{
  1058. rt: rt,
  1059. rtid: rtid,
  1060. kind: uint8(rk),
  1061. pkgpath: rt.PkgPath(),
  1062. keyType: valueTypeString, // default it - so it's never 0
  1063. }
  1064. // ti.rv0 = reflect.Zero(rt)
  1065. // ti.comparable = rt.Comparable()
  1066. ti.numMeth = uint16(rt.NumMethod())
  1067. ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp)
  1068. ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp)
  1069. ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp)
  1070. ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp)
  1071. ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp)
  1072. ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp)
  1073. ti.cs, ti.csp = implIntf(rt, selferTyp)
  1074. ti.mf, ti.mfp = implIntf(rt, missingFielderTyp)
  1075. b1, b2 := implIntf(rt, iszeroTyp)
  1076. if b1 {
  1077. ti.flags |= typeInfoFlagIsZeroer
  1078. }
  1079. if b2 {
  1080. ti.flags |= typeInfoFlagIsZeroerPtr
  1081. }
  1082. if rt.Comparable() {
  1083. ti.flags |= typeInfoFlagComparable
  1084. }
  1085. switch rk {
  1086. case reflect.Struct:
  1087. var omitEmpty bool
  1088. if f, ok := rt.FieldByName(structInfoFieldName); ok {
  1089. ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
  1090. ti.infoFieldOmitempty = omitEmpty
  1091. } else {
  1092. ti.keyType = valueTypeString
  1093. }
  1094. pp, pi := pool.tiLoad()
  1095. pv := pi.(*typeInfoLoadArray)
  1096. pv.etypes[0] = ti.rtid
  1097. // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
  1098. vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]}
  1099. x.rget(rt, rtid, omitEmpty, nil, &vv)
  1100. // ti.sfis = vv.sfis
  1101. ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv)
  1102. pp.Put(pi)
  1103. case reflect.Map:
  1104. ti.elem = rt.Elem()
  1105. ti.key = rt.Key()
  1106. case reflect.Slice:
  1107. ti.mbs, _ = implIntf(rt, mapBySliceTyp)
  1108. ti.elem = rt.Elem()
  1109. case reflect.Chan:
  1110. ti.elem = rt.Elem()
  1111. ti.chandir = uint8(rt.ChanDir())
  1112. case reflect.Array, reflect.Ptr:
  1113. ti.elem = rt.Elem()
  1114. }
  1115. // sfi = sfiSrc
  1116. x.mu.Lock()
  1117. sp = x.infos.load()
  1118. if sp == nil {
  1119. pti = &ti
  1120. vs := []rtid2ti{{rtid, pti}}
  1121. x.infos.store(vs)
  1122. } else {
  1123. idx, pti = x.find(sp, rtid)
  1124. if pti == nil {
  1125. pti = &ti
  1126. vs := make([]rtid2ti, len(sp)+1)
  1127. copy(vs, sp[:idx])
  1128. copy(vs[idx+1:], sp[idx:])
  1129. vs[idx] = rtid2ti{rtid, pti}
  1130. x.infos.store(vs)
  1131. }
  1132. }
  1133. x.mu.Unlock()
  1134. return
  1135. }
  1136. func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
  1137. indexstack []uint16, pv *typeInfoLoad) {
  1138. // Read up fields and store how to access the value.
  1139. //
  1140. // It uses go's rules for message selectors,
  1141. // which say that the field with the shallowest depth is selected.
  1142. //
  1143. // Note: we consciously use slices, not a map, to simulate a set.
  1144. // Typically, types have < 16 fields,
  1145. // and iteration using equals is faster than maps there
  1146. flen := rt.NumField()
  1147. if flen > (1<<maxLevelsEmbedding - 1) {
  1148. panicv.errorf("codec: types with > %v fields are not supported - has %v fields",
  1149. (1<<maxLevelsEmbedding - 1), flen)
  1150. }
  1151. // pv.sfis = make([]structFieldInfo, flen)
  1152. LOOP:
  1153. for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
  1154. f := rt.Field(int(j))
  1155. fkind := f.Type.Kind()
  1156. // skip if a func type, or is unexported, or structTag value == "-"
  1157. switch fkind {
  1158. case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
  1159. continue LOOP
  1160. }
  1161. isUnexported := f.PkgPath != ""
  1162. if isUnexported && !f.Anonymous {
  1163. continue
  1164. }
  1165. stag := x.structTag(f.Tag)
  1166. if stag == "-" {
  1167. continue
  1168. }
  1169. var si structFieldInfo
  1170. var parsed bool
  1171. // if anonymous and no struct tag (or it's blank),
  1172. // and a struct (or pointer to struct), inline it.
  1173. if f.Anonymous && fkind != reflect.Interface {
  1174. // ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
  1175. ft := f.Type
  1176. isPtr := ft.Kind() == reflect.Ptr
  1177. for ft.Kind() == reflect.Ptr {
  1178. ft = ft.Elem()
  1179. }
  1180. isStruct := ft.Kind() == reflect.Struct
  1181. // Ignore embedded fields of unexported non-struct types.
  1182. // Also, from go1.10, ignore pointers to unexported struct types
  1183. // because unmarshal cannot assign a new struct to an unexported field.
  1184. // See https://golang.org/issue/21357
  1185. if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
  1186. continue
  1187. }
  1188. doInline := stag == ""
  1189. if !doInline {
  1190. si.parseTag(stag)
  1191. parsed = true
  1192. doInline = si.encName == ""
  1193. // doInline = si.isZero()
  1194. }
  1195. if doInline && isStruct {
  1196. // if etypes contains this, don't call rget again (as fields are already seen here)
  1197. ftid := rt2id(ft)
  1198. // We cannot recurse forever, but we need to track other field depths.
  1199. // So - we break if we see a type twice (not the first time).
  1200. // This should be sufficient to handle an embedded type that refers to its
  1201. // owning type, which then refers to its embedded type.
  1202. processIt := true
  1203. numk := 0
  1204. for _, k := range pv.etypes {
  1205. if k == ftid {
  1206. numk++
  1207. if numk == rgetMaxRecursion {
  1208. processIt = false
  1209. break
  1210. }
  1211. }
  1212. }
  1213. if processIt {
  1214. pv.etypes = append(pv.etypes, ftid)
  1215. indexstack2 := make([]uint16, len(indexstack)+1)
  1216. copy(indexstack2, indexstack)
  1217. indexstack2[len(indexstack)] = j
  1218. // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
  1219. x.rget(ft, ftid, omitEmpty, indexstack2, pv)
  1220. }
  1221. continue
  1222. }
  1223. }
  1224. // after the anonymous dance: if an unexported field, skip
  1225. if isUnexported {
  1226. continue
  1227. }
  1228. if f.Name == "" {
  1229. panic(errNoFieldNameToStructFieldInfo)
  1230. }
  1231. // pv.fNames = append(pv.fNames, f.Name)
  1232. // if si.encName == "" {
  1233. if !parsed {
  1234. si.encName = f.Name
  1235. si.parseTag(stag)
  1236. parsed = true
  1237. } else if si.encName == "" {
  1238. si.encName = f.Name
  1239. }
  1240. si.encNameAsciiAlphaNum = true
  1241. for i := len(si.encName) - 1; i >= 0; i-- {
  1242. b := si.encName[i]
  1243. if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') {
  1244. continue
  1245. }
  1246. si.encNameAsciiAlphaNum = false
  1247. break
  1248. }
  1249. si.fieldName = f.Name
  1250. si.flagSet(structFieldInfoFlagReady)
  1251. // pv.encNames = append(pv.encNames, si.encName)
  1252. // si.ikind = int(f.Type.Kind())
  1253. if len(indexstack) > maxLevelsEmbedding-1 {
  1254. panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
  1255. maxLevelsEmbedding-1, len(indexstack))
  1256. }
  1257. si.nis = uint8(len(indexstack)) + 1
  1258. copy(si.is[:], indexstack)
  1259. si.is[len(indexstack)] = j
  1260. if omitEmpty {
  1261. si.flagSet(structFieldInfoFlagOmitEmpty)
  1262. }
  1263. pv.sfis = append(pv.sfis, si)
  1264. }
  1265. }
  1266. func tiSep(name string) uint8 {
  1267. // (xn[0]%64) // (between 192-255 - outside ascii BMP)
  1268. // return 0xfe - (name[0] & 63)
  1269. // return 0xfe - (name[0] & 63) - uint8(len(name))
  1270. // return 0xfe - (name[0] & 63) - uint8(len(name)&63)
  1271. // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
  1272. return 0xfe - (name[0] & 63) - uint8(len(name)&63)
  1273. }
  1274. func tiSep2(name []byte) uint8 {
  1275. return 0xfe - (name[0] & 63) - uint8(len(name)&63)
  1276. }
  1277. // resolves the struct field info got from a call to rget.
  1278. // Returns a trimmed, unsorted and sorted []*structFieldInfo.
  1279. func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) (
  1280. y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) {
  1281. sa := pv.sfiidx[:0]
  1282. sn := pv.b[:]
  1283. n := len(x)
  1284. var xn string
  1285. var ui uint16
  1286. var sep byte
  1287. for i := range x {
  1288. ui = uint16(i)
  1289. xn = x[i].encName // fieldName or encName? use encName for now.
  1290. if len(xn)+2 > cap(pv.b) {
  1291. sn = make([]byte, len(xn)+2)
  1292. } else {
  1293. sn = sn[:len(xn)+2]
  1294. }
  1295. // use a custom sep, so that misses are less frequent,
  1296. // since the sep (first char in search) is as unique as first char in field name.
  1297. sep = tiSep(xn)
  1298. sn[0], sn[len(sn)-1] = sep, 0xff
  1299. copy(sn[1:], xn)
  1300. j := bytes.Index(sa, sn)
  1301. if j == -1 {
  1302. sa = append(sa, sep)
  1303. sa = append(sa, xn...)
  1304. sa = append(sa, 0xff, byte(ui>>8), byte(ui))
  1305. } else {
  1306. index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8
  1307. // one of them must be reset to nil,
  1308. // and the index updated appropriately to the other one
  1309. if x[i].nis == x[index].nis {
  1310. } else if x[i].nis < x[index].nis {
  1311. sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui)
  1312. if x[index].ready() {
  1313. x[index].flagClr(structFieldInfoFlagReady)
  1314. n--
  1315. }
  1316. } else {
  1317. if x[i].ready() {
  1318. x[i].flagClr(structFieldInfoFlagReady)
  1319. n--
  1320. }
  1321. }
  1322. }
  1323. }
  1324. var w []structFieldInfo
  1325. sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray
  1326. if sharingArray {
  1327. w = make([]structFieldInfo, n)
  1328. }
  1329. // remove all the nils (non-ready)
  1330. y = make([]*structFieldInfo, n)
  1331. n = 0
  1332. var sslen int
  1333. for i := range x {
  1334. if !x[i].ready() {
  1335. continue
  1336. }
  1337. if !anyOmitEmpty && x[i].omitEmpty() {
  1338. anyOmitEmpty = true
  1339. }
  1340. if sharingArray {
  1341. w[n] = x[i]
  1342. y[n] = &w[n]
  1343. } else {
  1344. y[n] = &x[i]
  1345. }
  1346. sslen = sslen + len(x[i].encName) + 4
  1347. n++
  1348. }
  1349. if n != len(y) {
  1350. panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
  1351. rt, len(y), len(x), n)
  1352. }
  1353. z = make([]*structFieldInfo, len(y))
  1354. copy(z, y)
  1355. sort.Sort(sfiSortedByEncName(z))
  1356. sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen
  1357. if sharingArray {
  1358. ss = make([]byte, 0, sslen)
  1359. } else {
  1360. ss = sa[:0] // reuse the newly made sa array if necessary
  1361. }
  1362. for i := range z {
  1363. xn = z[i].encName
  1364. sep = tiSep(xn)
  1365. ui = uint16(i)
  1366. ss = append(ss, sep)
  1367. ss = append(ss, xn...)
  1368. ss = append(ss, 0xff, byte(ui>>8), byte(ui))
  1369. }
  1370. return
  1371. }
  1372. func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
  1373. return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
  1374. }
  1375. // isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
  1376. // - does it implement IsZero() bool
  1377. // - is it comparable, and can i compare directly using ==
  1378. // - if checkStruct, then walk through the encodable fields
  1379. // and check if they are empty or not.
  1380. func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
  1381. // v is a struct kind - no need to check again.
  1382. // We only check isZero on a struct kind, to reduce the amount of times
  1383. // that we lookup the rtid and typeInfo for each type as we walk the tree.
  1384. vt := v.Type()
  1385. rtid := rt2id(vt)
  1386. if tinfos == nil {
  1387. tinfos = defTypeInfos
  1388. }
  1389. ti := tinfos.get(rtid, vt)
  1390. if ti.rtid == timeTypId {
  1391. return rv2i(v).(time.Time).IsZero()
  1392. }
  1393. if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() {
  1394. return rv2i(v.Addr()).(isZeroer).IsZero()
  1395. }
  1396. if ti.isFlag(typeInfoFlagIsZeroer) {
  1397. return rv2i(v).(isZeroer).IsZero()
  1398. }
  1399. if ti.isFlag(typeInfoFlagComparable) {
  1400. return rv2i(v) == rv2i(reflect.Zero(vt))
  1401. }
  1402. if !checkStruct {
  1403. return false
  1404. }
  1405. // We only care about what we can encode/decode,
  1406. // so that is what we use to check omitEmpty.
  1407. for _, si := range ti.sfiSrc {
  1408. sfv, valid := si.field(v, false)
  1409. if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) {
  1410. return false
  1411. }
  1412. }
  1413. return true
  1414. }
  1415. // func roundFloat(x float64) float64 {
  1416. // t := math.Trunc(x)
  1417. // if math.Abs(x-t) >= 0.5 {
  1418. // return t + math.Copysign(1, x)
  1419. // }
  1420. // return t
  1421. // }
  1422. func panicToErr(h errDecorator, err *error) {
  1423. // Note: This method MUST be called directly from defer i.e. defer panicToErr ...
  1424. // else it seems the recover is not fully handled
  1425. if recoverPanicToErr {
  1426. if x := recover(); x != nil {
  1427. // fmt.Printf("panic'ing with: %v\n", x)
  1428. // debug.PrintStack()
  1429. panicValToErr(h, x, err)
  1430. }
  1431. }
  1432. }
  1433. func panicValToErr(h errDecorator, v interface{}, err *error) {
  1434. switch xerr := v.(type) {
  1435. case nil:
  1436. case error:
  1437. switch xerr {
  1438. case nil:
  1439. case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
  1440. // treat as special (bubble up)
  1441. *err = xerr
  1442. default:
  1443. h.wrapErr(xerr, err)
  1444. }
  1445. case string:
  1446. if xerr != "" {
  1447. h.wrapErr(xerr, err)
  1448. }
  1449. case fmt.Stringer:
  1450. if xerr != nil {
  1451. h.wrapErr(xerr, err)
  1452. }
  1453. default:
  1454. h.wrapErr(v, err)
  1455. }
  1456. }
  1457. func isImmutableKind(k reflect.Kind) (v bool) {
  1458. return immutableKindsSet[k]
  1459. }
  1460. // ----
  1461. type codecFnInfo struct {
  1462. ti *typeInfo
  1463. xfFn Ext
  1464. xfTag uint64
  1465. seq seqType
  1466. addrD bool
  1467. addrF bool // if addrD, this says whether decode function can take a value or a ptr
  1468. addrE bool
  1469. ready bool // ready to use
  1470. }
  1471. // codecFn encapsulates the captured variables and the encode function.
  1472. // This way, we only do some calculations one times, and pass to the
  1473. // code block that should be called (encapsulated in a function)
  1474. // instead of executing the checks every time.
  1475. type codecFn struct {
  1476. i codecFnInfo
  1477. fe func(*Encoder, *codecFnInfo, reflect.Value)
  1478. fd func(*Decoder, *codecFnInfo, reflect.Value)
  1479. _ [1]uint64 // padding
  1480. }
  1481. type codecRtidFn struct {
  1482. rtid uintptr
  1483. fn *codecFn
  1484. }
  1485. type codecFner struct {
  1486. // hh Handle
  1487. h *BasicHandle
  1488. s []codecRtidFn
  1489. be bool
  1490. js bool
  1491. _ [6]byte // padding
  1492. _ [3]uint64 // padding
  1493. }
  1494. func (c *codecFner) reset(hh Handle) {
  1495. bh := hh.getBasicHandle()
  1496. // only reset iff extensions changed or *TypeInfos changed
  1497. var hhSame = true &&
  1498. c.h == bh && c.h.TypeInfos == bh.TypeInfos &&
  1499. len(c.h.extHandle) == len(bh.extHandle) &&
  1500. (len(c.h.extHandle) == 0 || &c.h.extHandle[0] == &bh.extHandle[0])
  1501. if !hhSame {
  1502. // c.hh = hh
  1503. c.h, bh = bh, c.h // swap both
  1504. _, c.js = hh.(*JsonHandle)
  1505. c.be = hh.isBinary()
  1506. if len(c.s) > 0 {
  1507. c.s = c.s[:0]
  1508. }
  1509. // for i := range c.s {
  1510. // c.s[i].fn.i.ready = false
  1511. // }
  1512. }
  1513. }
  1514. func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) {
  1515. rtid := rt2id(rt)
  1516. for _, x := range c.s {
  1517. if x.rtid == rtid {
  1518. // if rtid exists, then there's a *codenFn attached (non-nil)
  1519. fn = x.fn
  1520. if fn.i.ready {
  1521. return
  1522. }
  1523. break
  1524. }
  1525. }
  1526. var ti *typeInfo
  1527. if fn == nil {
  1528. fn = new(codecFn)
  1529. if c.s == nil {
  1530. c.s = make([]codecRtidFn, 0, 8)
  1531. }
  1532. c.s = append(c.s, codecRtidFn{rtid, fn})
  1533. } else {
  1534. ti = fn.i.ti
  1535. *fn = codecFn{}
  1536. fn.i.ti = ti
  1537. // fn.fe, fn.fd = nil, nil
  1538. }
  1539. fi := &(fn.i)
  1540. fi.ready = true
  1541. if ti == nil {
  1542. ti = c.h.getTypeInfo(rtid, rt)
  1543. fi.ti = ti
  1544. }
  1545. rk := reflect.Kind(ti.kind)
  1546. if checkCodecSelfer && (ti.cs || ti.csp) {
  1547. fn.fe = (*Encoder).selferMarshal
  1548. fn.fd = (*Decoder).selferUnmarshal
  1549. fi.addrF = true
  1550. fi.addrD = ti.csp
  1551. fi.addrE = ti.csp
  1552. } else if rtid == timeTypId {
  1553. fn.fe = (*Encoder).kTime
  1554. fn.fd = (*Decoder).kTime
  1555. } else if rtid == rawTypId {
  1556. fn.fe = (*Encoder).raw
  1557. fn.fd = (*Decoder).raw
  1558. } else if rtid == rawExtTypId {
  1559. fn.fe = (*Encoder).rawExt
  1560. fn.fd = (*Decoder).rawExt
  1561. fi.addrF = true
  1562. fi.addrD = true
  1563. fi.addrE = true
  1564. } else if xfFn := c.h.getExt(rtid); xfFn != nil {
  1565. fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
  1566. fn.fe = (*Encoder).ext
  1567. fn.fd = (*Decoder).ext
  1568. fi.addrF = true
  1569. fi.addrD = true
  1570. if rk == reflect.Struct || rk == reflect.Array {
  1571. fi.addrE = true
  1572. }
  1573. } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) {
  1574. fn.fe = (*Encoder).binaryMarshal
  1575. fn.fd = (*Decoder).binaryUnmarshal
  1576. fi.addrF = true
  1577. fi.addrD = ti.bup
  1578. fi.addrE = ti.bmp
  1579. } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) {
  1580. //If JSON, we should check JSONMarshal before textMarshal
  1581. fn.fe = (*Encoder).jsonMarshal
  1582. fn.fd = (*Decoder).jsonUnmarshal
  1583. fi.addrF = true
  1584. fi.addrD = ti.jup
  1585. fi.addrE = ti.jmp
  1586. } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) {
  1587. fn.fe = (*Encoder).textMarshal
  1588. fn.fd = (*Decoder).textUnmarshal
  1589. fi.addrF = true
  1590. fi.addrD = ti.tup
  1591. fi.addrE = ti.tmp
  1592. } else {
  1593. if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
  1594. if ti.pkgpath == "" { // un-named slice or map
  1595. if idx := fastpathAV.index(rtid); idx != -1 {
  1596. fn.fe = fastpathAV[idx].encfn
  1597. fn.fd = fastpathAV[idx].decfn
  1598. fi.addrD = true
  1599. fi.addrF = false
  1600. }
  1601. } else {
  1602. // use mapping for underlying type if there
  1603. var rtu reflect.Type
  1604. if rk == reflect.Map {
  1605. rtu = reflect.MapOf(ti.key, ti.elem)
  1606. } else {
  1607. rtu = reflect.SliceOf(ti.elem)
  1608. }
  1609. rtuid := rt2id(rtu)
  1610. if idx := fastpathAV.index(rtuid); idx != -1 {
  1611. xfnf := fastpathAV[idx].encfn
  1612. xrt := fastpathAV[idx].rt
  1613. fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
  1614. xfnf(e, xf, xrv.Convert(xrt))
  1615. }
  1616. fi.addrD = true
  1617. fi.addrF = false // meaning it can be an address(ptr) or a value
  1618. xfnf2 := fastpathAV[idx].decfn
  1619. fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  1620. if xrv.Kind() == reflect.Ptr {
  1621. xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt)))
  1622. } else {
  1623. xfnf2(d, xf, xrv.Convert(xrt))
  1624. }
  1625. }
  1626. }
  1627. }
  1628. }
  1629. if fn.fe == nil && fn.fd == nil {
  1630. switch rk {
  1631. case reflect.Bool:
  1632. fn.fe = (*Encoder).kBool
  1633. fn.fd = (*Decoder).kBool
  1634. case reflect.String:
  1635. fn.fe = (*Encoder).kString
  1636. fn.fd = (*Decoder).kString
  1637. case reflect.Int:
  1638. fn.fd = (*Decoder).kInt
  1639. fn.fe = (*Encoder).kInt
  1640. case reflect.Int8:
  1641. fn.fe = (*Encoder).kInt8
  1642. fn.fd = (*Decoder).kInt8
  1643. case reflect.Int16:
  1644. fn.fe = (*Encoder).kInt16
  1645. fn.fd = (*Decoder).kInt16
  1646. case reflect.Int32:
  1647. fn.fe = (*Encoder).kInt32
  1648. fn.fd = (*Decoder).kInt32
  1649. case reflect.Int64:
  1650. fn.fe = (*Encoder).kInt64
  1651. fn.fd = (*Decoder).kInt64
  1652. case reflect.Uint:
  1653. fn.fd = (*Decoder).kUint
  1654. fn.fe = (*Encoder).kUint
  1655. case reflect.Uint8:
  1656. fn.fe = (*Encoder).kUint8
  1657. fn.fd = (*Decoder).kUint8
  1658. case reflect.Uint16:
  1659. fn.fe = (*Encoder).kUint16
  1660. fn.fd = (*Decoder).kUint16
  1661. case reflect.Uint32:
  1662. fn.fe = (*Encoder).kUint32
  1663. fn.fd = (*Decoder).kUint32
  1664. case reflect.Uint64:
  1665. fn.fe = (*Encoder).kUint64
  1666. fn.fd = (*Decoder).kUint64
  1667. case reflect.Uintptr:
  1668. fn.fe = (*Encoder).kUintptr
  1669. fn.fd = (*Decoder).kUintptr
  1670. case reflect.Float32:
  1671. fn.fe = (*Encoder).kFloat32
  1672. fn.fd = (*Decoder).kFloat32
  1673. case reflect.Float64:
  1674. fn.fe = (*Encoder).kFloat64
  1675. fn.fd = (*Decoder).kFloat64
  1676. case reflect.Invalid:
  1677. fn.fe = (*Encoder).kInvalid
  1678. fn.fd = (*Decoder).kErr
  1679. case reflect.Chan:
  1680. fi.seq = seqTypeChan
  1681. fn.fe = (*Encoder).kSlice
  1682. fn.fd = (*Decoder).kSlice
  1683. case reflect.Slice:
  1684. fi.seq = seqTypeSlice
  1685. fn.fe = (*Encoder).kSlice
  1686. fn.fd = (*Decoder).kSlice
  1687. case reflect.Array:
  1688. fi.seq = seqTypeArray
  1689. fn.fe = (*Encoder).kSlice
  1690. fi.addrF = false
  1691. fi.addrD = false
  1692. rt2 := reflect.SliceOf(ti.elem)
  1693. fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  1694. d.cfer().get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len()))
  1695. }
  1696. // fn.fd = (*Decoder).kArray
  1697. case reflect.Struct:
  1698. if ti.anyOmitEmpty || ti.mf || ti.mfp {
  1699. fn.fe = (*Encoder).kStruct
  1700. } else {
  1701. fn.fe = (*Encoder).kStructNoOmitempty
  1702. }
  1703. fn.fd = (*Decoder).kStruct
  1704. case reflect.Map:
  1705. fn.fe = (*Encoder).kMap
  1706. fn.fd = (*Decoder).kMap
  1707. case reflect.Interface:
  1708. // encode: reflect.Interface are handled already by preEncodeValue
  1709. fn.fd = (*Decoder).kInterface
  1710. fn.fe = (*Encoder).kErr
  1711. default:
  1712. // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
  1713. fn.fe = (*Encoder).kErr
  1714. fn.fd = (*Decoder).kErr
  1715. }
  1716. }
  1717. }
  1718. return
  1719. }
  1720. type codecFnPooler struct {
  1721. cf *codecFner
  1722. cfp *sync.Pool
  1723. hh Handle
  1724. }
  1725. func (d *codecFnPooler) cfer() *codecFner {
  1726. if d.cf == nil {
  1727. var v interface{}
  1728. d.cfp, v = pool.codecFner()
  1729. d.cf = v.(*codecFner)
  1730. d.cf.reset(d.hh)
  1731. }
  1732. return d.cf
  1733. }
  1734. func (d *codecFnPooler) alwaysAtEnd() {
  1735. if d.cf != nil {
  1736. d.cfp.Put(d.cf)
  1737. d.cf, d.cfp = nil, nil
  1738. }
  1739. }
  1740. // ----
  1741. // these "checkOverflow" functions must be inlinable, and not call anybody.
  1742. // Overflow means that the value cannot be represented without wrapping/overflow.
  1743. // Overflow=false does not mean that the value can be represented without losing precision
  1744. // (especially for floating point).
  1745. type checkOverflow struct{}
  1746. // func (checkOverflow) Float16(f float64) (overflow bool) {
  1747. // panicv.errorf("unimplemented")
  1748. // if f < 0 {
  1749. // f = -f
  1750. // }
  1751. // return math.MaxFloat32 < f && f <= math.MaxFloat64
  1752. // }
  1753. func (checkOverflow) Float32(v float64) (overflow bool) {
  1754. if v < 0 {
  1755. v = -v
  1756. }
  1757. return math.MaxFloat32 < v && v <= math.MaxFloat64
  1758. }
  1759. func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
  1760. if bitsize == 0 || bitsize >= 64 || v == 0 {
  1761. return
  1762. }
  1763. if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
  1764. overflow = true
  1765. }
  1766. return
  1767. }
  1768. func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
  1769. if bitsize == 0 || bitsize >= 64 || v == 0 {
  1770. return
  1771. }
  1772. if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
  1773. overflow = true
  1774. }
  1775. return
  1776. }
  1777. func (checkOverflow) SignedInt(v uint64) (overflow bool) {
  1778. //e.g. -127 to 128 for int8
  1779. pos := (v >> 63) == 0
  1780. ui2 := v & 0x7fffffffffffffff
  1781. if pos {
  1782. if ui2 > math.MaxInt64 {
  1783. overflow = true
  1784. }
  1785. } else {
  1786. if ui2 > math.MaxInt64-1 {
  1787. overflow = true
  1788. }
  1789. }
  1790. return
  1791. }
  1792. func (x checkOverflow) Float32V(v float64) float64 {
  1793. if x.Float32(v) {
  1794. panicv.errorf("float32 overflow: %v", v)
  1795. }
  1796. return v
  1797. }
  1798. func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
  1799. if x.Uint(v, bitsize) {
  1800. panicv.errorf("uint64 overflow: %v", v)
  1801. }
  1802. return v
  1803. }
  1804. func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
  1805. if x.Int(v, bitsize) {
  1806. panicv.errorf("int64 overflow: %v", v)
  1807. }
  1808. return v
  1809. }
  1810. func (x checkOverflow) SignedIntV(v uint64) int64 {
  1811. if x.SignedInt(v) {
  1812. panicv.errorf("uint64 to int64 overflow: %v", v)
  1813. }
  1814. return int64(v)
  1815. }
  1816. // ------------------ SORT -----------------
  1817. func isNaN(f float64) bool { return f != f }
  1818. // -----------------------
  1819. type ioFlusher interface {
  1820. Flush() error
  1821. }
  1822. type ioPeeker interface {
  1823. Peek(int) ([]byte, error)
  1824. }
  1825. type ioBuffered interface {
  1826. Buffered() int
  1827. }
  1828. // -----------------------
  1829. type intSlice []int64
  1830. type uintSlice []uint64
  1831. // type uintptrSlice []uintptr
  1832. type floatSlice []float64
  1833. type boolSlice []bool
  1834. type stringSlice []string
  1835. // type bytesSlice [][]byte
  1836. func (p intSlice) Len() int { return len(p) }
  1837. func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
  1838. func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1839. func (p uintSlice) Len() int { return len(p) }
  1840. func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
  1841. func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1842. // func (p uintptrSlice) Len() int { return len(p) }
  1843. // func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] }
  1844. // func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1845. func (p floatSlice) Len() int { return len(p) }
  1846. func (p floatSlice) Less(i, j int) bool {
  1847. return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
  1848. }
  1849. func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1850. func (p stringSlice) Len() int { return len(p) }
  1851. func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
  1852. func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1853. // func (p bytesSlice) Len() int { return len(p) }
  1854. // func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
  1855. // func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1856. func (p boolSlice) Len() int { return len(p) }
  1857. func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
  1858. func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1859. // ---------------------
  1860. type sfiRv struct {
  1861. v *structFieldInfo
  1862. r reflect.Value
  1863. }
  1864. type intRv struct {
  1865. v int64
  1866. r reflect.Value
  1867. }
  1868. type intRvSlice []intRv
  1869. type uintRv struct {
  1870. v uint64
  1871. r reflect.Value
  1872. }
  1873. type uintRvSlice []uintRv
  1874. type floatRv struct {
  1875. v float64
  1876. r reflect.Value
  1877. }
  1878. type floatRvSlice []floatRv
  1879. type boolRv struct {
  1880. v bool
  1881. r reflect.Value
  1882. }
  1883. type boolRvSlice []boolRv
  1884. type stringRv struct {
  1885. v string
  1886. r reflect.Value
  1887. }
  1888. type stringRvSlice []stringRv
  1889. type bytesRv struct {
  1890. v []byte
  1891. r reflect.Value
  1892. }
  1893. type bytesRvSlice []bytesRv
  1894. type timeRv struct {
  1895. v time.Time
  1896. r reflect.Value
  1897. }
  1898. type timeRvSlice []timeRv
  1899. func (p intRvSlice) Len() int { return len(p) }
  1900. func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
  1901. func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1902. func (p uintRvSlice) Len() int { return len(p) }
  1903. func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
  1904. func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1905. func (p floatRvSlice) Len() int { return len(p) }
  1906. func (p floatRvSlice) Less(i, j int) bool {
  1907. return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
  1908. }
  1909. func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1910. func (p stringRvSlice) Len() int { return len(p) }
  1911. func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
  1912. func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1913. func (p bytesRvSlice) Len() int { return len(p) }
  1914. func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
  1915. func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1916. func (p boolRvSlice) Len() int { return len(p) }
  1917. func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
  1918. func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1919. func (p timeRvSlice) Len() int { return len(p) }
  1920. func (p timeRvSlice) Less(i, j int) bool { return p[i].v.Before(p[j].v) }
  1921. func (p timeRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1922. // -----------------
  1923. type bytesI struct {
  1924. v []byte
  1925. i interface{}
  1926. }
  1927. type bytesISlice []bytesI
  1928. func (p bytesISlice) Len() int { return len(p) }
  1929. func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
  1930. func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1931. // -----------------
  1932. type set []uintptr
  1933. func (s *set) add(v uintptr) (exists bool) {
  1934. // e.ci is always nil, or len >= 1
  1935. x := *s
  1936. if x == nil {
  1937. x = make([]uintptr, 1, 8)
  1938. x[0] = v
  1939. *s = x
  1940. return
  1941. }
  1942. // typically, length will be 1. make this perform.
  1943. if len(x) == 1 {
  1944. if j := x[0]; j == 0 {
  1945. x[0] = v
  1946. } else if j == v {
  1947. exists = true
  1948. } else {
  1949. x = append(x, v)
  1950. *s = x
  1951. }
  1952. return
  1953. }
  1954. // check if it exists
  1955. for _, j := range x {
  1956. if j == v {
  1957. exists = true
  1958. return
  1959. }
  1960. }
  1961. // try to replace a "deleted" slot
  1962. for i, j := range x {
  1963. if j == 0 {
  1964. x[i] = v
  1965. return
  1966. }
  1967. }
  1968. // if unable to replace deleted slot, just append it.
  1969. x = append(x, v)
  1970. *s = x
  1971. return
  1972. }
  1973. func (s *set) remove(v uintptr) (exists bool) {
  1974. x := *s
  1975. if len(x) == 0 {
  1976. return
  1977. }
  1978. if len(x) == 1 {
  1979. if x[0] == v {
  1980. x[0] = 0
  1981. }
  1982. return
  1983. }
  1984. for i, j := range x {
  1985. if j == v {
  1986. exists = true
  1987. x[i] = 0 // set it to 0, as way to delete it.
  1988. // copy(x[i:], x[i+1:])
  1989. // x = x[:len(x)-1]
  1990. return
  1991. }
  1992. }
  1993. return
  1994. }
  1995. // ------
  1996. // bitset types are better than [256]bool, because they permit the whole
  1997. // bitset array being on a single cache line and use less memory.
  1998. // given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
  1999. // consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
  2000. type bitset256 [32]byte
  2001. func (x *bitset256) isset(pos byte) bool {
  2002. return x[pos>>3]&(1<<(pos&7)) != 0
  2003. }
  2004. func (x *bitset256) issetv(pos byte) byte {
  2005. return x[pos>>3] & (1 << (pos & 7))
  2006. }
  2007. func (x *bitset256) set(pos byte) {
  2008. x[pos>>3] |= (1 << (pos & 7))
  2009. }
  2010. // func (x *bitset256) unset(pos byte) {
  2011. // x[pos>>3] &^= (1 << (pos & 7))
  2012. // }
  2013. type bitset128 [16]byte
  2014. func (x *bitset128) isset(pos byte) bool {
  2015. return x[pos>>3]&(1<<(pos&7)) != 0
  2016. }
  2017. func (x *bitset128) set(pos byte) {
  2018. x[pos>>3] |= (1 << (pos & 7))
  2019. }
  2020. // func (x *bitset128) unset(pos byte) {
  2021. // x[pos>>3] &^= (1 << (pos & 7))
  2022. // }
  2023. type bitset32 [4]byte
  2024. func (x *bitset32) isset(pos byte) bool {
  2025. return x[pos>>3]&(1<<(pos&7)) != 0
  2026. }
  2027. func (x *bitset32) set(pos byte) {
  2028. x[pos>>3] |= (1 << (pos & 7))
  2029. }
  2030. // func (x *bitset32) unset(pos byte) {
  2031. // x[pos>>3] &^= (1 << (pos & 7))
  2032. // }
  2033. // type bit2set256 [64]byte
  2034. // func (x *bit2set256) set(pos byte, v1, v2 bool) {
  2035. // var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
  2036. // if v1 {
  2037. // x[pos>>2] |= 1 << (pos2 + 1)
  2038. // }
  2039. // if v2 {
  2040. // x[pos>>2] |= 1 << pos2
  2041. // }
  2042. // }
  2043. // func (x *bit2set256) get(pos byte) uint8 {
  2044. // var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
  2045. // return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011
  2046. // }
  2047. // ------------
  2048. type pooler struct {
  2049. dn sync.Pool // for decNaked
  2050. cfn sync.Pool // for codecFner
  2051. tiload sync.Pool
  2052. strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool // for stringRV
  2053. }
  2054. func (p *pooler) init() {
  2055. p.strRv8.New = func() interface{} { return new([8]sfiRv) }
  2056. p.strRv16.New = func() interface{} { return new([16]sfiRv) }
  2057. p.strRv32.New = func() interface{} { return new([32]sfiRv) }
  2058. p.strRv64.New = func() interface{} { return new([64]sfiRv) }
  2059. p.strRv128.New = func() interface{} { return new([128]sfiRv) }
  2060. p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
  2061. p.tiload.New = func() interface{} { return new(typeInfoLoadArray) }
  2062. p.cfn.New = func() interface{} { return new(codecFner) }
  2063. }
  2064. func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) {
  2065. return &p.strRv8, p.strRv8.Get()
  2066. }
  2067. func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) {
  2068. return &p.strRv16, p.strRv16.Get()
  2069. }
  2070. func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) {
  2071. return &p.strRv32, p.strRv32.Get()
  2072. }
  2073. func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) {
  2074. return &p.strRv64, p.strRv64.Get()
  2075. }
  2076. func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) {
  2077. return &p.strRv128, p.strRv128.Get()
  2078. }
  2079. func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
  2080. return &p.dn, p.dn.Get()
  2081. }
  2082. func (p *pooler) codecFner() (sp *sync.Pool, v interface{}) {
  2083. return &p.cfn, p.cfn.Get()
  2084. }
  2085. func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
  2086. return &p.tiload, p.tiload.Get()
  2087. }
  2088. // func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) {
  2089. // sp := &(p.dn)
  2090. // vv := sp.Get()
  2091. // return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) }
  2092. // }
  2093. // func (p *pooler) decNakedGet() (v interface{}) {
  2094. // return p.dn.Get()
  2095. // }
  2096. // func (p *pooler) codecFnerGet() (v interface{}) {
  2097. // return p.cfn.Get()
  2098. // }
  2099. // func (p *pooler) tiLoadGet() (v interface{}) {
  2100. // return p.tiload.Get()
  2101. // }
  2102. // func (p *pooler) decNakedPut(v interface{}) {
  2103. // p.dn.Put(v)
  2104. // }
  2105. // func (p *pooler) codecFnerPut(v interface{}) {
  2106. // p.cfn.Put(v)
  2107. // }
  2108. // func (p *pooler) tiLoadPut(v interface{}) {
  2109. // p.tiload.Put(v)
  2110. // }
  2111. type panicHdl struct{}
  2112. func (panicHdl) errorv(err error) {
  2113. if err != nil {
  2114. panic(err)
  2115. }
  2116. }
  2117. func (panicHdl) errorstr(message string) {
  2118. if message != "" {
  2119. panic(message)
  2120. }
  2121. }
  2122. func (panicHdl) errorf(format string, params ...interface{}) {
  2123. if format != "" {
  2124. if len(params) == 0 {
  2125. panic(format)
  2126. } else {
  2127. panic(fmt.Sprintf(format, params...))
  2128. }
  2129. }
  2130. }
  2131. type errDecorator interface {
  2132. wrapErr(in interface{}, out *error)
  2133. }
  2134. type errDecoratorDef struct{}
  2135. func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) }
  2136. type must struct{}
  2137. func (must) String(s string, err error) string {
  2138. if err != nil {
  2139. panicv.errorv(err)
  2140. }
  2141. return s
  2142. }
  2143. func (must) Int(s int64, err error) int64 {
  2144. if err != nil {
  2145. panicv.errorv(err)
  2146. }
  2147. return s
  2148. }
  2149. func (must) Uint(s uint64, err error) uint64 {
  2150. if err != nil {
  2151. panicv.errorv(err)
  2152. }
  2153. return s
  2154. }
  2155. func (must) Float(s float64, err error) float64 {
  2156. if err != nil {
  2157. panicv.errorv(err)
  2158. }
  2159. return s
  2160. }
  2161. // xdebugf prints the message in red on the terminal.
  2162. // Use it in place of fmt.Printf (which it calls internally)
  2163. func xdebugf(pattern string, args ...interface{}) {
  2164. var delim string
  2165. if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' {
  2166. delim = "\n"
  2167. }
  2168. fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...)
  2169. }
  2170. // func isImmutableKind(k reflect.Kind) (v bool) {
  2171. // return false ||
  2172. // k == reflect.Int ||
  2173. // k == reflect.Int8 ||
  2174. // k == reflect.Int16 ||
  2175. // k == reflect.Int32 ||
  2176. // k == reflect.Int64 ||
  2177. // k == reflect.Uint ||
  2178. // k == reflect.Uint8 ||
  2179. // k == reflect.Uint16 ||
  2180. // k == reflect.Uint32 ||
  2181. // k == reflect.Uint64 ||
  2182. // k == reflect.Uintptr ||
  2183. // k == reflect.Float32 ||
  2184. // k == reflect.Float64 ||
  2185. // k == reflect.Bool ||
  2186. // k == reflect.String
  2187. // }
  2188. // func timeLocUTCName(tzint int16) string {
  2189. // if tzint == 0 {
  2190. // return "UTC"
  2191. // }
  2192. // var tzname = []byte("UTC+00:00")
  2193. // //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
  2194. // //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
  2195. // var tzhr, tzmin int16
  2196. // if tzint < 0 {
  2197. // tzname[3] = '-' // (TODO: verify. this works here)
  2198. // tzhr, tzmin = -tzint/60, (-tzint)%60
  2199. // } else {
  2200. // tzhr, tzmin = tzint/60, tzint%60
  2201. // }
  2202. // tzname[4] = timeDigits[tzhr/10]
  2203. // tzname[5] = timeDigits[tzhr%10]
  2204. // tzname[7] = timeDigits[tzmin/10]
  2205. // tzname[8] = timeDigits[tzmin%10]
  2206. // return string(tzname)
  2207. // //return time.FixedZone(string(tzname), int(tzint)*60)
  2208. // }