blob: d0aaee03d59d07b4bd39ef861f1ea62ccc5fae30 [file] [log] [blame]
Dan Willemsencc753b72021-08-31 13:25:42 -07001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package walk
6
7import (
8 "fmt"
9 "go/constant"
10 "go/token"
11 "strings"
12
13 "cmd/compile/internal/base"
14 "cmd/compile/internal/escape"
15 "cmd/compile/internal/ir"
16 "cmd/compile/internal/reflectdata"
17 "cmd/compile/internal/typecheck"
18 "cmd/compile/internal/types"
19)
20
21// Rewrite append(src, x, y, z) so that any side effects in
22// x, y, z (including runtime panics) are evaluated in
23// initialization statements before the append.
24// For normal code generation, stop there and leave the
25// rest to cgen_append.
26//
27// For race detector, expand append(src, a [, b]* ) to
28//
29// init {
30// s := src
31// const argc = len(args) - 1
32// if cap(s) - len(s) < argc {
33// s = growslice(s, len(s)+argc)
34// }
35// n := len(s)
36// s = s[:n+argc]
37// s[n] = a
38// s[n+1] = b
39// ...
40// }
41// s
42func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
43 if !ir.SameSafeExpr(dst, n.Args[0]) {
44 n.Args[0] = safeExpr(n.Args[0], init)
45 n.Args[0] = walkExpr(n.Args[0], init)
46 }
47 walkExprListSafe(n.Args[1:], init)
48
49 nsrc := n.Args[0]
50
51 // walkExprListSafe will leave OINDEX (s[n]) alone if both s
52 // and n are name or literal, but those may index the slice we're
53 // modifying here. Fix explicitly.
54 // Using cheapExpr also makes sure that the evaluation
55 // of all arguments (and especially any panics) happen
56 // before we begin to modify the slice in a visible way.
57 ls := n.Args[1:]
58 for i, n := range ls {
59 n = cheapExpr(n, init)
60 if !types.Identical(n.Type(), nsrc.Type().Elem()) {
61 n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
62 n = walkExpr(n, init)
63 }
64 ls[i] = n
65 }
66
67 argc := len(n.Args) - 1
68 if argc < 1 {
69 return nsrc
70 }
71
72 // General case, with no function calls left as arguments.
73 // Leave for gen, except that instrumentation requires old form.
74 if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
75 return n
76 }
77
78 var l []ir.Node
79
80 ns := typecheck.Temp(nsrc.Type())
81 l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src
82
83 na := ir.NewInt(int64(argc)) // const argc
84 nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc
85 nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)
86
87 fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
88 fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
89
90 nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
91 ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
92
93 l = append(l, nif)
94
95 nn := typecheck.Temp(types.Types[types.TINT])
96 l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s)
97
98 slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns, nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) // ...s[:n+argc]
99 slice.SetBounded(true)
100 l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc]
101
102 ls = n.Args[1:]
103 for i, n := range ls {
104 ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ...
105 ix.SetBounded(true)
106 l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg
107 if i+1 < len(ls) {
108 l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1
109 }
110 }
111
112 typecheck.Stmts(l)
113 walkStmtList(l)
114 init.Append(l...)
115 return ns
116}
117
118// walkClose walks an OCLOSE node.
119func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
120 // cannot use chanfn - closechan takes any, not chan any
121 fn := typecheck.LookupRuntime("closechan")
122 fn = typecheck.SubstArgTypes(fn, n.X.Type())
123 return mkcall1(fn, nil, init, n.X)
124}
125
126// Lower copy(a, b) to a memmove call or a runtime call.
127//
128// init {
129// n := len(a)
130// if n > len(b) { n = len(b) }
131// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
132// }
133// n;
134//
135// Also works if b is a string.
136//
137func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
138 if n.X.Type().Elem().HasPointers() {
139 ir.CurFunc.SetWBPos(n.Pos())
140 fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
141 n.X = cheapExpr(n.X, init)
142 ptrL, lenL := backingArrayPtrLen(n.X)
143 n.Y = cheapExpr(n.Y, init)
144 ptrR, lenR := backingArrayPtrLen(n.Y)
145 return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
146 }
147
148 if runtimecall {
149 // rely on runtime to instrument:
150 // copy(n.Left, n.Right)
151 // n.Right can be a slice or string.
152
153 n.X = cheapExpr(n.X, init)
154 ptrL, lenL := backingArrayPtrLen(n.X)
155 n.Y = cheapExpr(n.Y, init)
156 ptrR, lenR := backingArrayPtrLen(n.Y)
157
158 fn := typecheck.LookupRuntime("slicecopy")
159 fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
160
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800161 return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Size()))
Dan Willemsencc753b72021-08-31 13:25:42 -0700162 }
163
164 n.X = walkExpr(n.X, init)
165 n.Y = walkExpr(n.Y, init)
166 nl := typecheck.Temp(n.X.Type())
167 nr := typecheck.Temp(n.Y.Type())
168 var l []ir.Node
169 l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
170 l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
171
172 nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
173 nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
174
175 nlen := typecheck.Temp(types.Types[types.TINT])
176
177 // n = len(to)
178 l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
179
180 // if n > len(frm) { n = len(frm) }
181 nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
182
183 nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
184 nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
185 l = append(l, nif)
186
187 // if to.ptr != frm.ptr { memmove( ... ) }
188 ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
189 ne.Likely = true
190 l = append(l, ne)
191
192 fn := typecheck.LookupRuntime("memmove")
193 fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
194 nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
195 setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
196 ne.Body.Append(setwid)
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800197 nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Size()))
Dan Willemsencc753b72021-08-31 13:25:42 -0700198 call := mkcall1(fn, nil, init, nto, nfrm, nwid)
199 ne.Body.Append(call)
200
201 typecheck.Stmts(l)
202 walkStmtList(l)
203 init.Append(l...)
204 return nlen
205}
206
207// walkDelete walks an ODELETE node.
208func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
209 init.Append(ir.TakeInit(n)...)
210 map_ := n.Args[0]
211 key := n.Args[1]
212 map_ = walkExpr(map_, init)
213 key = walkExpr(key, init)
214
215 t := map_.Type()
216 fast := mapfast(t)
217 key = mapKeyArg(fast, n, key)
218 return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
219}
220
221// walkLenCap walks an OLEN or OCAP node.
222func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
223 if isRuneCount(n) {
224 // Replace len([]rune(string)) with runtime.countrunes(string).
225 return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
226 }
227
228 n.X = walkExpr(n.X, init)
229
230 // replace len(*[10]int) with 10.
231 // delayed until now to preserve side effects.
232 t := n.X.Type()
233
234 if t.IsPtr() {
235 t = t.Elem()
236 }
237 if t.IsArray() {
238 safeExpr(n.X, init)
239 con := typecheck.OrigInt(n, t.NumElem())
240 con.SetTypecheck(1)
241 return con
242 }
243 return n
244}
245
246// walkMakeChan walks an OMAKECHAN node.
247func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
248 // When size fits into int, use makechan instead of
249 // makechan64, which is faster and shorter on 32 bit platforms.
250 size := n.Len
251 fnname := "makechan64"
252 argtype := types.Types[types.TINT64]
253
254 // Type checking guarantees that TIDEAL size is positive and fits in an int.
255 // The case of size overflow when converting TUINT or TUINTPTR to TINT
256 // will be handled by the negative range checks in makechan during runtime.
257 if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
258 fnname = "makechan"
259 argtype = types.Types[types.TINT]
260 }
261
262 return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
263}
264
265// walkMakeMap walks an OMAKEMAP node.
266func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
267 t := n.Type()
268 hmapType := reflectdata.MapType(t)
269 hint := n.Len
270
271 // var h *hmap
272 var h ir.Node
273 if n.Esc() == ir.EscNone {
274 // Allocate hmap on stack.
275
276 // var hv hmap
277 // h = &hv
278 h = stackTempAddr(init, hmapType)
279
280 // Allocate one bucket pointed to by hmap.buckets on stack if hint
281 // is not larger than BUCKETSIZE. In case hint is larger than
282 // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
283 // Maximum key and elem size is 128 bytes, larger objects
284 // are stored with an indirection. So max bucket size is 2048+eps.
285 if !ir.IsConst(hint, constant.Int) ||
286 constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
287
288 // In case hint is larger than BUCKETSIZE runtime.makemap
289 // will allocate the buckets on the heap, see #20184
290 //
291 // if hint <= BUCKETSIZE {
292 // var bv bmap
293 // b = &bv
294 // h.buckets = b
295 // }
296
297 nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
298 nif.Likely = true
299
300 // var bv bmap
301 // b = &bv
302 b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
303
304 // h.buckets = b
305 bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
306 na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
307 nif.Body.Append(na)
308 appendWalkStmt(init, nif)
309 }
310 }
311
312 if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
313 // Handling make(map[any]any) and
314 // make(map[any]any, hint) where hint <= BUCKETSIZE
315 // special allows for faster map initialization and
316 // improves binary size by using calls with fewer arguments.
317 // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
318 // and no buckets will be allocated by makemap. Therefore,
319 // no buckets need to be allocated in this code path.
320 if n.Esc() == ir.EscNone {
321 // Only need to initialize h.hash0 since
322 // hmap h has been allocated on the stack already.
323 // h.hash0 = fastrand()
324 rand := mkcall("fastrand", types.Types[types.TUINT32], init)
325 hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
326 appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
327 return typecheck.ConvNop(h, t)
328 }
329 // Call runtime.makehmap to allocate an
330 // hmap on the heap and initialize hmap's hash0 field.
331 fn := typecheck.LookupRuntime("makemap_small")
332 fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
333 return mkcall1(fn, n.Type(), init)
334 }
335
336 if n.Esc() != ir.EscNone {
337 h = typecheck.NodNil()
338 }
339 // Map initialization with a variable or large hint is
340 // more complicated. We therefore generate a call to
341 // runtime.makemap to initialize hmap and allocate the
342 // map buckets.
343
344 // When hint fits into int, use makemap instead of
345 // makemap64, which is faster and shorter on 32 bit platforms.
346 fnname := "makemap64"
347 argtype := types.Types[types.TINT64]
348
349 // Type checking guarantees that TIDEAL hint is positive and fits in an int.
350 // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
351 // The case of hint overflow when converting TUINT or TUINTPTR to TINT
352 // will be handled by the negative range checks in makemap during runtime.
353 if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
354 fnname = "makemap"
355 argtype = types.Types[types.TINT]
356 }
357
358 fn := typecheck.LookupRuntime(fnname)
359 fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
360 return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
361}
362
363// walkMakeSlice walks an OMAKESLICE node.
364func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
365 l := n.Len
366 r := n.Cap
367 if r == nil {
368 r = safeExpr(l, init)
369 l = r
370 }
371 t := n.Type()
372 if t.Elem().NotInHeap() {
373 base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
374 }
375 if n.Esc() == ir.EscNone {
376 if why := escape.HeapAllocReason(n); why != "" {
377 base.Fatalf("%v has EscNone, but %v", n, why)
378 }
379 // var arr [r]T
380 // n = arr[:l]
381 i := typecheck.IndexConst(r)
382 if i < 0 {
383 base.Fatalf("walkExpr: invalid index %v", r)
384 }
385
386 // cap is constrained to [0,2^31) or [0,2^63) depending on whether
387 // we're in 32-bit or 64-bit systems. So it's safe to do:
388 //
389 // if uint64(len) > cap {
390 // if len < 0 { panicmakeslicelen() }
391 // panicmakeslicecap()
392 // }
393 nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil)
394 niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil)
395 niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
396 nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
397 init.Append(typecheck.Stmt(nif))
398
399 t = types.NewArray(t.Elem(), i) // [r]T
400 var_ := typecheck.Temp(t)
401 appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
402 r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l]
403 // The conv is necessary in case n.Type is named.
404 return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
405 }
406
407 // n escapes; set up a call to makeslice.
408 // When len and cap can fit into int, use makeslice instead of
409 // makeslice64, which is faster and shorter on 32 bit platforms.
410
411 len, cap := l, r
412
413 fnname := "makeslice64"
414 argtype := types.Types[types.TINT64]
415
416 // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
417 // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
418 // will be handled by the negative range checks in makeslice during runtime.
419 if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
420 (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
421 fnname = "makeslice"
422 argtype = types.Types[types.TINT]
423 }
424 fn := typecheck.LookupRuntime(fnname)
425 ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
426 ptr.MarkNonNil()
427 len = typecheck.Conv(len, types.Types[types.TINT])
428 cap = typecheck.Conv(cap, types.Types[types.TINT])
429 sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, len, cap)
430 return walkExpr(typecheck.Expr(sh), init)
431}
432
433// walkMakeSliceCopy walks an OMAKESLICECOPY node.
434func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
435 if n.Esc() == ir.EscNone {
436 base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
437 }
438
439 t := n.Type()
440 if t.Elem().NotInHeap() {
441 base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
442 }
443
444 length := typecheck.Conv(n.Len, types.Types[types.TINT])
445 copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
446 copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
447
448 if !t.Elem().HasPointers() && n.Bounded() {
449 // When len(to)==len(from) and elements have no pointers:
450 // replace make+copy with runtime.mallocgc+runtime.memmove.
451
452 // We do not check for overflow of len(to)*elem.Width here
453 // since len(from) is an existing checked slice capacity
454 // with same elem.Width for the from slice.
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800455 size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Size()), types.Types[types.TUINTPTR]))
Dan Willemsencc753b72021-08-31 13:25:42 -0700456
457 // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
458 fn := typecheck.LookupRuntime("mallocgc")
459 ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false))
460 ptr.MarkNonNil()
461 sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
462
463 s := typecheck.Temp(t)
464 r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
465 r = walkExpr(r, init)
466 init.Append(r)
467
468 // instantiate memmove(to *any, frm *any, size uintptr)
469 fn = typecheck.LookupRuntime("memmove")
470 fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
471 ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
472 init.Append(walkExpr(typecheck.Stmt(ncopy), init))
473
474 return s
475 }
476 // Replace make+copy with runtime.makeslicecopy.
477 // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
478 fn := typecheck.LookupRuntime("makeslicecopy")
479 ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
480 ptr.MarkNonNil()
481 sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
482 return walkExpr(typecheck.Expr(sh), init)
483}
484
485// walkNew walks an ONEW node.
486func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
487 t := n.Type().Elem()
488 if t.NotInHeap() {
489 base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
490 }
491 if n.Esc() == ir.EscNone {
492 if t.Size() > ir.MaxImplicitStackVarSize {
493 base.Fatalf("large ONEW with EscNone: %v", n)
494 }
495 return stackTempAddr(init, t)
496 }
497 types.CalcSize(t)
498 n.MarkNonNil()
499 return n
500}
501
502// generate code for print
503func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
504 // Hoist all the argument evaluation up before the lock.
505 walkExprListCheap(nn.Args, init)
506
507 // For println, add " " between elements and "\n" at the end.
508 if nn.Op() == ir.OPRINTN {
509 s := nn.Args
510 t := make([]ir.Node, 0, len(s)*2)
511 for i, n := range s {
512 if i != 0 {
513 t = append(t, ir.NewString(" "))
514 }
515 t = append(t, n)
516 }
517 t = append(t, ir.NewString("\n"))
518 nn.Args = t
519 }
520
521 // Collapse runs of constant strings.
522 s := nn.Args
523 t := make([]ir.Node, 0, len(s))
524 for i := 0; i < len(s); {
525 var strs []string
526 for i < len(s) && ir.IsConst(s[i], constant.String) {
527 strs = append(strs, ir.StringVal(s[i]))
528 i++
529 }
530 if len(strs) > 0 {
531 t = append(t, ir.NewString(strings.Join(strs, "")))
532 }
533 if i < len(s) {
534 t = append(t, s[i])
535 i++
536 }
537 }
538 nn.Args = t
539
540 calls := []ir.Node{mkcall("printlock", nil, init)}
541 for i, n := range nn.Args {
542 if n.Op() == ir.OLITERAL {
543 if n.Type() == types.UntypedRune {
544 n = typecheck.DefaultLit(n, types.RuneType)
545 }
546
547 switch n.Val().Kind() {
548 case constant.Int:
549 n = typecheck.DefaultLit(n, types.Types[types.TINT64])
550
551 case constant.Float:
552 n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
553 }
554 }
555
556 if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
557 n = typecheck.DefaultLit(n, types.Types[types.TINT64])
558 }
559 n = typecheck.DefaultLit(n, nil)
560 nn.Args[i] = n
561 if n.Type() == nil || n.Type().Kind() == types.TFORW {
562 continue
563 }
564
565 var on *ir.Name
566 switch n.Type().Kind() {
567 case types.TINTER:
568 if n.Type().IsEmptyInterface() {
569 on = typecheck.LookupRuntime("printeface")
570 } else {
571 on = typecheck.LookupRuntime("printiface")
572 }
573 on = typecheck.SubstArgTypes(on, n.Type()) // any-1
574 case types.TPTR:
575 if n.Type().Elem().NotInHeap() {
576 on = typecheck.LookupRuntime("printuintptr")
577 n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
578 n.SetType(types.Types[types.TUNSAFEPTR])
579 n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
580 n.SetType(types.Types[types.TUINTPTR])
581 break
582 }
583 fallthrough
584 case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
585 on = typecheck.LookupRuntime("printpointer")
586 on = typecheck.SubstArgTypes(on, n.Type()) // any-1
587 case types.TSLICE:
588 on = typecheck.LookupRuntime("printslice")
589 on = typecheck.SubstArgTypes(on, n.Type()) // any-1
590 case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
591 if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
592 on = typecheck.LookupRuntime("printhex")
593 } else {
594 on = typecheck.LookupRuntime("printuint")
595 }
596 case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
597 on = typecheck.LookupRuntime("printint")
598 case types.TFLOAT32, types.TFLOAT64:
599 on = typecheck.LookupRuntime("printfloat")
600 case types.TCOMPLEX64, types.TCOMPLEX128:
601 on = typecheck.LookupRuntime("printcomplex")
602 case types.TBOOL:
603 on = typecheck.LookupRuntime("printbool")
604 case types.TSTRING:
605 cs := ""
606 if ir.IsConst(n, constant.String) {
607 cs = ir.StringVal(n)
608 }
609 switch cs {
610 case " ":
611 on = typecheck.LookupRuntime("printsp")
612 case "\n":
613 on = typecheck.LookupRuntime("printnl")
614 default:
615 on = typecheck.LookupRuntime("printstring")
616 }
617 default:
618 badtype(ir.OPRINT, n.Type(), nil)
619 continue
620 }
621
622 r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
623 if params := on.Type().Params().FieldSlice(); len(params) > 0 {
624 t := params[0].Type
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800625 n = typecheck.Conv(n, t)
Dan Willemsencc753b72021-08-31 13:25:42 -0700626 r.Args.Append(n)
627 }
628 calls = append(calls, r)
629 }
630
631 calls = append(calls, mkcall("printunlock", nil, init))
632
633 typecheck.Stmts(calls)
634 walkExprList(calls, init)
635
636 r := ir.NewBlockStmt(base.Pos, nil)
637 r.List = calls
638 return walkStmt(typecheck.Stmt(r))
639}
640
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800641// walkRecover walks an ORECOVERFP node.
642func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
643 return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
Dan Willemsencc753b72021-08-31 13:25:42 -0700644}
645
646func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
647 ptr := safeExpr(n.X, init)
648 len := safeExpr(n.Y, init)
649
650 fnname := "unsafeslice64"
651 lenType := types.Types[types.TINT64]
652
653 // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
654 // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
655 // will be handled by the negative range checks in unsafeslice during runtime.
656 if ir.ShouldCheckPtr(ir.CurFunc, 1) {
657 fnname = "unsafeslicecheckptr"
658 // for simplicity, unsafeslicecheckptr always uses int64
659 } else if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
660 fnname = "unsafeslice"
661 lenType = types.Types[types.TINT]
662 }
663
664 t := n.Type()
665
666 // Call runtime.unsafeslice{,64,checkptr} to check ptr and len.
667 fn := typecheck.LookupRuntime(fnname)
668 init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), typecheck.Conv(len, lenType)))
669
670 h := ir.NewSliceHeaderExpr(n.Pos(), t,
671 typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
672 typecheck.Conv(len, types.Types[types.TINT]),
673 typecheck.Conv(len, types.Types[types.TINT]))
674 return walkExpr(typecheck.Expr(h), init)
675}
676
677func badtype(op ir.Op, tl, tr *types.Type) {
678 var s string
679 if tl != nil {
680 s += fmt.Sprintf("\n\t%v", tl)
681 }
682 if tr != nil {
683 s += fmt.Sprintf("\n\t%v", tr)
684 }
685
686 // common mistake: *struct and *interface.
687 if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
688 if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
689 s += "\n\t(*struct vs *interface)"
690 } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
691 s += "\n\t(*interface vs *struct)"
692 }
693 }
694
695 base.Errorf("illegal types for operand: %v%s", op, s)
696}
697
698func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
699 fn := typecheck.LookupRuntime(name)
700 fn = typecheck.SubstArgTypes(fn, l, r)
701 return fn
702}
703
704// isRuneCount reports whether n is of the form len([]rune(string)).
705// These are optimized into a call to runtime.countrunes.
706func isRuneCount(n ir.Node) bool {
707 return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
708}