Feature/cc/do not write image (#108)

* Remove excessive memory usage during image processing

* Do not copy the image on disk locally. Save wearing of SSD and speedup startup
This commit is contained in:
Alex Goodman 2018-11-29 18:08:40 -05:00 committed by GitHub
parent 3edff1760d
commit 3320d2df68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -2,14 +2,9 @@ package image
import (
"archive/tar"
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
@ -126,11 +121,11 @@ func NewImageConfig(configBytes []byte) ImageConfig {
return imageConfig
}
func processLayerTar(line *jotframe.Line, layerMap map[string]*filetree.FileTree, name string, tarredBytes []byte) {
func processLayerTar(line *jotframe.Line, layerMap map[string]*filetree.FileTree, name string, reader *tar.Reader) {
tree := filetree.NewFileTree()
tree.Name = name
fileInfos := getFileList(tarredBytes)
fileInfos := getFileList(reader)
shortName := name[:15]
pb := NewProgressBar(int64(len(fileInfos)))
@ -167,26 +162,9 @@ func InitializeData(imageID string) ([]*Layer, []*filetree.FileTree, float64, fi
utils.RunDockerCmd("pull", imageID)
}
// save this image to disk temporarily to get the content info
imageTarPath, tmpDir := saveImage(imageID)
// fmt.Println(imageTarPath)
// fmt.Println(tmpDir)
// imageTarPath := "/tmp/dive280665036/image.tar"
defer os.RemoveAll(tmpDir)
// read through the image contents and build a tree
tarFile, err := os.Open(imageTarPath)
if err != nil {
fmt.Println(err)
utils.Exit(1)
}
tarFile, totalSize := getImageReader(imageID)
defer tarFile.Close()
fi, err := tarFile.Stat()
if err != nil {
logrus.Panic(err)
}
totalSize := fi.Size()
var observedBytes int64
var percent int
@ -231,14 +209,8 @@ func InitializeData(imageID string) ([]*Layer, []*filetree.FileTree, float64, fi
shortName := name[:15]
io.WriteString(line, " ├─ "+shortName+" : loading...")
var tarredBytes = make([]byte, header.Size)
_, err = tarReader.Read(tarredBytes)
if err != nil && err != io.EOF {
logrus.Panic(err)
}
go processLayerTar(line, layerMap, name, tarredBytes)
layerReader := tar.NewReader(tarReader)
processLayerTar(line, layerMap, name, layerReader)
} else if strings.HasSuffix(name, ".json") {
var fileBuffer = make([]byte, header.Size)
n, err = tarReader.Read(fileBuffer)
@ -297,7 +269,7 @@ func InitializeData(imageID string) ([]*Layer, []*filetree.FileTree, float64, fi
return layers, trees, efficiency, inefficiencies
}
func saveImage(imageID string) (string, string) {
func getImageReader(imageID string) (io.ReadCloser, int64) {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.WithVersion(dockerVersion), client.FromEnv)
if err != nil {
@ -320,70 +292,14 @@ func saveImage(imageID string) (string, string) {
readCloser, err := dockerClient.ImageSave(ctx, []string{imageID})
check(err)
defer readCloser.Close()
tmpDir, err := ioutil.TempDir("", "dive")
check(err)
cleanUpTmp := func() {
os.RemoveAll(tmpDir)
}
imageTarPath := filepath.Join(tmpDir, "image.tar")
imageFile, err := os.Create(imageTarPath)
check(err)
defer func() {
if err := imageFile.Close(); err != nil {
cleanUpTmp()
logrus.Panic(err)
}
}()
imageWriter := bufio.NewWriter(imageFile)
pb := NewProgressBar(totalSize)
var observedBytes int64
buf := make([]byte, 1024)
for {
n, err := readCloser.Read(buf)
if err != nil && err != io.EOF {
cleanUpTmp()
logrus.Panic(err)
}
if n == 0 {
break
}
observedBytes += int64(n)
if pb.Update(observedBytes) {
io.WriteString(line, fmt.Sprintf(" Fetching image... %s", pb.String()))
}
if _, err := imageWriter.Write(buf[:n]); err != nil {
cleanUpTmp()
logrus.Panic(err)
}
}
if err = imageWriter.Flush(); err != nil {
cleanUpTmp()
logrus.Panic(err)
}
pb.Done()
io.WriteString(line, fmt.Sprintf(" Fetching image... %s", pb.String()))
frame.Close()
return imageTarPath, tmpDir
return readCloser, totalSize
}
func getFileList(tarredBytes []byte) []filetree.FileInfo {
func getFileList(tarReader *tar.Reader) []filetree.FileInfo {
var files []filetree.FileInfo
reader := bytes.NewReader(tarredBytes)
tarReader := tar.NewReader(reader)
for {
header, err := tarReader.Next()