123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443 |
- // Copyright 2014 Google Inc. All Rights Reserved.
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
-
- package profile
-
- import (
- "fmt"
- "sort"
- "strconv"
- "strings"
- )
-
- // Compact performs garbage collection on a profile to remove any
- // unreferenced fields. This is useful to reduce the size of a profile
- // after samples or locations have been removed.
- func (p *Profile) Compact() *Profile {
- p, _ = Merge([]*Profile{p})
- return p
- }
-
- // Merge merges all the profiles in profs into a single Profile.
- // Returns a new profile independent of the input profiles. The merged
- // profile is compacted to eliminate unused samples, locations,
- // functions and mappings. Profiles must have identical profile sample
- // and period types or the merge will fail. profile.Period of the
- // resulting profile will be the maximum of all profiles, and
- // profile.TimeNanos will be the earliest nonzero one.
- func Merge(srcs []*Profile) (*Profile, error) {
- if len(srcs) == 0 {
- return nil, fmt.Errorf("no profiles to merge")
- }
- p, err := combineHeaders(srcs)
- if err != nil {
- return nil, err
- }
-
- pm := &profileMerger{
- p: p,
- samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
- locations: make(map[locationKey]*Location, len(srcs[0].Location)),
- functions: make(map[functionKey]*Function, len(srcs[0].Function)),
- mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
- }
-
- for _, src := range srcs {
- // Clear the profile-specific hash tables
- pm.locationsByID = make(map[uint64]*Location, len(src.Location))
- pm.functionsByID = make(map[uint64]*Function, len(src.Function))
- pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
-
- if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
- // The Mapping list has the property that the first mapping
- // represents the main binary. Take the first Mapping we see,
- // otherwise the operations below will add mappings in an
- // arbitrary order.
- pm.mapMapping(srcs[0].Mapping[0])
- }
-
- for _, s := range src.Sample {
- if !isZeroSample(s) {
- pm.mapSample(s)
- }
- }
- }
-
- for _, s := range p.Sample {
- if isZeroSample(s) {
- // If there are any zero samples, re-merge the profile to GC
- // them.
- return Merge([]*Profile{p})
- }
- }
-
- return p, nil
- }
-
- func isZeroSample(s *Sample) bool {
- for _, v := range s.Value {
- if v != 0 {
- return false
- }
- }
- return true
- }
-
- type profileMerger struct {
- p *Profile
-
- // Memoization tables within a profile.
- locationsByID map[uint64]*Location
- functionsByID map[uint64]*Function
- mappingsByID map[uint64]mapInfo
-
- // Memoization tables for profile entities.
- samples map[sampleKey]*Sample
- locations map[locationKey]*Location
- functions map[functionKey]*Function
- mappings map[mappingKey]*Mapping
- }
-
- type mapInfo struct {
- m *Mapping
- offset int64
- }
-
- func (pm *profileMerger) mapSample(src *Sample) *Sample {
- s := &Sample{
- Location: make([]*Location, len(src.Location)),
- Value: make([]int64, len(src.Value)),
- Label: make(map[string][]string, len(src.Label)),
- NumLabel: make(map[string][]int64, len(src.NumLabel)),
- }
- for i, l := range src.Location {
- s.Location[i] = pm.mapLocation(l)
- }
- for k, v := range src.Label {
- vv := make([]string, len(v))
- copy(vv, v)
- s.Label[k] = vv
- }
- for k, v := range src.NumLabel {
- vv := make([]int64, len(v))
- copy(vv, v)
- s.NumLabel[k] = vv
- }
- // Check memoization table. Must be done on the remapped location to
- // account for the remapped mapping. Add current values to the
- // existing sample.
- k := s.key()
- if ss, ok := pm.samples[k]; ok {
- for i, v := range src.Value {
- ss.Value[i] += v
- }
- return ss
- }
- copy(s.Value, src.Value)
- pm.samples[k] = s
- pm.p.Sample = append(pm.p.Sample, s)
- return s
- }
-
- // key generates sampleKey to be used as a key for maps.
- func (sample *Sample) key() sampleKey {
- ids := make([]string, len(sample.Location))
- for i, l := range sample.Location {
- ids[i] = strconv.FormatUint(l.ID, 16)
- }
-
- labels := make([]string, 0, len(sample.Label))
- for k, v := range sample.Label {
- labels = append(labels, fmt.Sprintf("%q%q", k, v))
- }
- sort.Strings(labels)
-
- numlabels := make([]string, 0, len(sample.NumLabel))
- for k, v := range sample.NumLabel {
- numlabels = append(numlabels, fmt.Sprintf("%q%x", k, v))
- }
- sort.Strings(numlabels)
-
- return sampleKey{
- strings.Join(ids, "|"),
- strings.Join(labels, ""),
- strings.Join(numlabels, ""),
- }
- }
-
- type sampleKey struct {
- locations string
- labels string
- numlabels string
- }
-
- func (pm *profileMerger) mapLocation(src *Location) *Location {
- if src == nil {
- return nil
- }
-
- if l, ok := pm.locationsByID[src.ID]; ok {
- pm.locationsByID[src.ID] = l
- return l
- }
-
- mi := pm.mapMapping(src.Mapping)
- l := &Location{
- ID: uint64(len(pm.p.Location) + 1),
- Mapping: mi.m,
- Address: uint64(int64(src.Address) + mi.offset),
- Line: make([]Line, len(src.Line)),
- }
- for i, ln := range src.Line {
- l.Line[i] = pm.mapLine(ln)
- }
- // Check memoization table. Must be done on the remapped location to
- // account for the remapped mapping ID.
- k := l.key()
- if ll, ok := pm.locations[k]; ok {
- pm.locationsByID[src.ID] = ll
- return ll
- }
- pm.locationsByID[src.ID] = l
- pm.locations[k] = l
- pm.p.Location = append(pm.p.Location, l)
- return l
- }
-
- // key generates locationKey to be used as a key for maps.
- func (l *Location) key() locationKey {
- key := locationKey{
- addr: l.Address,
- }
- if l.Mapping != nil {
- // Normalizes address to handle address space randomization.
- key.addr -= l.Mapping.Start
- key.mappingID = l.Mapping.ID
- }
- lines := make([]string, len(l.Line)*2)
- for i, line := range l.Line {
- if line.Function != nil {
- lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
- }
- lines[i*2+1] = strconv.FormatInt(line.Line, 16)
- }
- key.lines = strings.Join(lines, "|")
- return key
- }
-
- type locationKey struct {
- addr, mappingID uint64
- lines string
- }
-
- func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
- if src == nil {
- return mapInfo{}
- }
-
- if mi, ok := pm.mappingsByID[src.ID]; ok {
- return mi
- }
-
- // Check memoization tables.
- bk, pk := src.key()
- if src.BuildID != "" {
- if m, ok := pm.mappings[bk]; ok {
- mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
- pm.mappingsByID[src.ID] = mi
- return mi
- }
- }
- if src.File != "" {
- if m, ok := pm.mappings[pk]; ok {
- mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
- pm.mappingsByID[src.ID] = mi
- return mi
- }
- }
- m := &Mapping{
- ID: uint64(len(pm.p.Mapping) + 1),
- Start: src.Start,
- Limit: src.Limit,
- Offset: src.Offset,
- File: src.File,
- BuildID: src.BuildID,
- HasFunctions: src.HasFunctions,
- HasFilenames: src.HasFilenames,
- HasLineNumbers: src.HasLineNumbers,
- HasInlineFrames: src.HasInlineFrames,
- }
- pm.p.Mapping = append(pm.p.Mapping, m)
-
- // Update memoization tables.
- if m.BuildID != "" {
- pm.mappings[bk] = m
- }
- if m.File != "" {
- pm.mappings[pk] = m
- }
- mi := mapInfo{m, 0}
- pm.mappingsByID[src.ID] = mi
- return mi
- }
-
- // key generates encoded strings of Mapping to be used as a key for
- // maps. The first key represents only the build id, while the second
- // represents only the file path.
- func (m *Mapping) key() (buildIDKey, pathKey mappingKey) {
- // Normalize addresses to handle address space randomization.
- // Round up to next 4K boundary to avoid minor discrepancies.
- const mapsizeRounding = 0x1000
-
- size := m.Limit - m.Start
- size = size + mapsizeRounding - 1
- size = size - (size % mapsizeRounding)
-
- buildIDKey = mappingKey{
- size,
- m.Offset,
- m.BuildID,
- }
-
- pathKey = mappingKey{
- size,
- m.Offset,
- m.File,
- }
- return
- }
-
- type mappingKey struct {
- size, offset uint64
- buildidIDOrFile string
- }
-
- func (pm *profileMerger) mapLine(src Line) Line {
- ln := Line{
- Function: pm.mapFunction(src.Function),
- Line: src.Line,
- }
- return ln
- }
-
- func (pm *profileMerger) mapFunction(src *Function) *Function {
- if src == nil {
- return nil
- }
- if f, ok := pm.functionsByID[src.ID]; ok {
- return f
- }
- k := src.key()
- if f, ok := pm.functions[k]; ok {
- pm.functionsByID[src.ID] = f
- return f
- }
- f := &Function{
- ID: uint64(len(pm.p.Function) + 1),
- Name: src.Name,
- SystemName: src.SystemName,
- Filename: src.Filename,
- StartLine: src.StartLine,
- }
- pm.functions[k] = f
- pm.functionsByID[src.ID] = f
- pm.p.Function = append(pm.p.Function, f)
- return f
- }
-
- // key generates a struct to be used as a key for maps.
- func (f *Function) key() functionKey {
- return functionKey{
- f.StartLine,
- f.Name,
- f.SystemName,
- f.Filename,
- }
- }
-
- type functionKey struct {
- startLine int64
- name, systemName, fileName string
- }
-
- // combineHeaders checks that all profiles can be merged and returns
- // their combined profile.
- func combineHeaders(srcs []*Profile) (*Profile, error) {
- for _, s := range srcs[1:] {
- if err := srcs[0].compatible(s); err != nil {
- return nil, err
- }
- }
-
- var timeNanos, durationNanos, period int64
- var comments []string
- var defaultSampleType string
- for _, s := range srcs {
- if timeNanos == 0 || s.TimeNanos < timeNanos {
- timeNanos = s.TimeNanos
- }
- durationNanos += s.DurationNanos
- if period == 0 || period < s.Period {
- period = s.Period
- }
- comments = append(comments, s.Comments...)
- if defaultSampleType == "" {
- defaultSampleType = s.DefaultSampleType
- }
- }
-
- p := &Profile{
- SampleType: make([]*ValueType, len(srcs[0].SampleType)),
-
- DropFrames: srcs[0].DropFrames,
- KeepFrames: srcs[0].KeepFrames,
-
- TimeNanos: timeNanos,
- DurationNanos: durationNanos,
- PeriodType: srcs[0].PeriodType,
- Period: period,
-
- Comments: comments,
- DefaultSampleType: defaultSampleType,
- }
- copy(p.SampleType, srcs[0].SampleType)
- return p, nil
- }
-
- // compatible determines if two profiles can be compared/merged.
- // returns nil if the profiles are compatible; otherwise an error with
- // details on the incompatibility.
- func (p *Profile) compatible(pb *Profile) error {
- if !equalValueType(p.PeriodType, pb.PeriodType) {
- return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
- }
-
- if len(p.SampleType) != len(pb.SampleType) {
- return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
- }
-
- for i := range p.SampleType {
- if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
- return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
- }
- }
-
- return nil
- }
-
- // equalValueType returns true if the two value types are semantically
- // equal. It ignores the internal fields used during encode/decode.
- func equalValueType(st1, st2 *ValueType) bool {
- return st1.Type == st2.Type && st1.Unit == st2.Unit
- }
|