Home / Go / go code snippets edit
Try Documentalist, my app that offers fast, offline access to 190+ programmer API docs.

  • files
    • read gzipped file
      package main
      
      import (
          "compress/gzip"
          "fmt"
          "io/ioutil"
          "os"
      )
      
      func ReadGzippedFile(path string) ([]byte, error) {
          file, err := os.Open(path)
          if err != nil {
              return nil, err
          }
          defer file.Close()
          gzipReader, err := gzip.NewReader(file)
          if err != nil {
              return nil, err
          }
          defer gzipReader.Close()
          return ioutil.ReadAll(gzipReader)
      }
      
      func main() {
          fileName := "t.gz"
          if data, err := ReadGzippedFile(fileName); err != nil {
              fmt.Printf("ReadGzippedFile('%s') failed with '%s'\n", fileName, err.Error())
          } else {
              fmt.Printf("Uncompressed size of '%s' file: %d bytes\n", fileName, len(data))
          }
      }
    • io.ReadCloser for a gzipped file
      // GzippedReadCloser is a io.ReadCloser for a gzip file
      type GzippedReadCloser struct {
      	f *os.File
      	r io.Reader
      }
      
      // Close closes a reader
      func (rc *GzippedReadCloser) Close() error {
      	return rc.f.Close()
      }
      
      // Read reads data from a reader
      func (rc *GzippedReadCloser) Read(d []byte) (int, error) {
      	return rc.r.Read(d)
      }
      
      func openGzipped(path string) (io.ReadCloser, error) {
      	f, err := os.Open(path)
      	if err != nil {
      		return nil, err
      	}
      	r, err := gzip.NewReader(f)
      	if err != nil {
      		return nil, err
      	}
      	rc := &GzippedReadCloser{
      		f: f,
      		r: r,
      	}
      	return rc, nil
      }
      
      func readGzipped(path string) ([]byte, error) {
      	rc, err := openGzipped(path)
      	if err != nil {
      		return nil, err
      	}
      	defer rc.Close()
      	d, err := ioutil.ReadAll(rc)
      	if err != nil {
      		return nil, err
      	}
      	return d, nil
      }
    • read file line by line using scanner
      import (
          "bufio"
          "fmt"
          "os"
      )
      
      func ReadLines(filePath string) ([]string, error) {
          file, err := os.OpenFile(filePath, os.O_RDONLY, 0666)
          if err != nil {
              return nil, err
          }
          defer file.Close()
          scanner := bufio.NewScanner(file)
          res := make([]string, 0)
          for scanner.Scan() {
              line := scanner.Bytes()
              res = append(res, string(line))
          }
          if err = scanner.Err(); err != nil {
              return nil, err
          }
          return res, nil
      }
    • read file line by line with string.Split()
      func readFileAsLines(path string) ([]string, error) {
      	d, err := ioutil.ReadFile(path)
      	if err != nil {
      		return nil, err
      	}
      	s := string(d)
      	res := strings.Split(s, "\n")
      	return res, nil
      }
    • copy file
      func mkdirForFile(filePath string) error {
      	dir := filepath.Dir(filePath)
      	return os.MkdirAll(dir, 0755)
      }
      
      func copyFile(dst string, src string) error {
      	err := mkdirForFile(dst)
      	if err != nil {
      		return err
      	}
      	fin, err := os.Open(src)
      	if err != nil {
      		return err
      	}
      	defer fin.Close()
      	fout, err := os.Create(dst)
      	if err != nil {
      		return err
      	}
      
      	_, err = io.Copy(fout, fin)
      	err2 := fout.Close()
      	if err != nil || err2 != nil {
      		os.Remove(dst)
      	}
      
      	return err
      }
    • combine.go
      package main
      
      import (
      	"bufio"
      	"io/ioutil"
      	"os"
      	"path/filepath"
      	"strings"
      )
      
      var (
      	destDir = "out"
      )
      
      func panicIfErr(err error) {
      	if err != nil {
      		panic(err.Error())
      	}
      }
      
      func getDirs() []string {
      	fileInfos, err := ioutil.ReadDir(".")
      	panicIfErr(err)
      	res := []string{}
      	for _, fi := range fileInfos {
      		if !fi.IsDir() {
      			continue
      		}
      		res = append(res, fi.Name())
      	}
      	return res
      }
      
      func mkdirForFile(filePath string) error {
      	dir := filepath.Dir(filePath)
      	return os.MkdirAll(dir, 0755)
      }
      
      func readLines(filePath string) ([]string, error) {
      	file, err := os.OpenFile(filePath, os.O_RDONLY, 0666)
      	if err != nil {
      		return nil, err
      	}
      	defer file.Close()
      	scanner := bufio.NewScanner(file)
      	res := make([]string, 0)
      	for scanner.Scan() {
      		line := scanner.Bytes()
      		res = append(res, string(line))
      	}
      	if err = scanner.Err(); err != nil {
      		return nil, err
      	}
      	return res, nil
      }
      
      func tweakAndCopyFile(dst string, src string) {
      	err := mkdirForFile(dst)
      	panicIfErr(err)
      
      	lines, err := readLines(src)
      	panicIfErr(err)
      	if lines[0] == "---" {
      		lines = lines[1:]
      	}
      
      	d := strings.Join(lines, "\n")
      	err = ioutil.WriteFile(dst, []byte(d), 0644)
      	panicIfErr(err)
      }
      
      func copyMdFilesFromDir(dir string) {
      	fileInfos, err := ioutil.ReadDir(dir)
      	panicIfErr(err)
      	parts := strings.Split(dir, "-")
      	suffix := parts[0]
      	for _, fi := range fileInfos {
      		if fi.IsDir() {
      			continue
      		}
      		name := fi.Name()
      		if !strings.HasSuffix(name, ".md") {
      			continue
      		}
      		src := filepath.Join(dir, name)
      		dstName := suffix + "-" + name
      		dst := filepath.Join(destDir, dstName)
      		tweakAndCopyFile(dst, src)
      	}
      }
      
      func main() {
      	dirs := getDirs()
      	for _, dir := range dirs {
      		copyMdFilesFromDir(dir)
      	}
      }
  • debounce
    // Debouncer runs a given function, debounced
    type Debouncer struct {
        currDebounceID *int32
    }
    
    // NewDebouncer creates a new Debouncer
    func NewDebouncer() *Debouncer {
        return &Debouncer{}
    }
    
    func (d *Debouncer) debounce(f func(), timeout time.Duration) {
        if d.currDebounceID != nil {
            // stop currently scheduled function
            v := atomic.AddInt32(d.currDebounceID, 1)
            d.currDebounceID = nil
            if v > 1 {
                // it was already executed
                return
            }
        }
    
        d.currDebounceID = new(int32)
        go func(f func(), timeout time.Duration, debounceID *int32) {
            for {
                select {
                case <-time.After(timeout):
                    v := atomic.AddInt32(debounceID, 1)
                    // if v != 1, it was cancelled
                    if v == 1 {
                        f()
                    }
                }
            }
        }(f, timeout, d.currDebounceID)
    }
    
    var articleLoadDebouncer *Debouncer
    
    func reloadArticlesDelayed() {
        if articleLoadDebouncer == nil {
            articleLoadDebouncer = NewDebouncer()
        }
        articleLoadDebouncer.debounce(loadArticles, time.Second)
    }
  • download url
    func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) {
        return func(netw, addr string) (net.Conn, error) {
            conn, err := net.DialTimeout(netw, addr, cTimeout)
            if err != nil {
                return nil, err
            }
            conn.SetDeadline(time.Now().Add(rwTimeout))
            return conn, nil
        }
    }
    
    // can be used for http.Get() requests with better timeouts. New one must be created
    // for each Get() request
    func NewTimeoutClient(connectTimeout time.Duration, readWriteTimeout time.Duration) *http.Client {
        return &http.Client{
            Transport: &http.Transport{
                Dial:  TimeoutDialer(connectTimeout, readWriteTimeout),
                Proxy: http.ProxyFromEnvironment,
            },
        }
    }
    
    func DownloadUrl(url string) ([]byte, error) {
        // default timeout for http.Get() is really long, so dial it down
        // for both connection and read/write timeouts
        timeoutClient := NewTimeoutClient(time.Second*120, time.Second*120)
        resp, err := timeoutClient.Get(url)
        if err != nil {
            return nil, err
        }
        defer resp.Body.Close()
        if resp.StatusCode != 200 {
            return nil, errors.New(fmt.Sprintf("'%s': status code not 200 (%d)", url, resp.StatusCode))
        }
        return ioutil.ReadAll(resp.Body)
    }
  • non-blocking send
    select {
    case ch <- value:
    default:
    }
  • progress_estimator.go
    package util
    
    import (
    	"sync"
    	"time"
    )
    
    // ProgressEstimatorData contains fields readable after Next()
    type ProgressEstimatorData struct {
    	Total             int
    	Curr              int
    	Left              int
    	PercDone          float64 // 0...1
    	Skipped           int
    	TimeSoFar         time.Duration
    	EstimatedTimeLeft time.Duration
    }
    
    // ProgressEstimator is for estimating progress
    type ProgressEstimator struct {
    	timeStart time.Time
    	sync.Mutex
    	ProgressEstimatorData
    }
    
    // NewProgressEstimator creates a ProgressEstimator
    func NewProgressEstimator(total int) *ProgressEstimator {
    	d := ProgressEstimatorData{
    		Total: total,
    	}
    	return &ProgressEstimator{
    		ProgressEstimatorData: d,
    		timeStart:             time.Now(),
    	}
    }
    
    // Next advances estimator
    func (pe *ProgressEstimator) next(isSkipped bool) ProgressEstimatorData {
    	pe.Lock()
    	defer pe.Unlock()
    	if isSkipped {
    		pe.Skipped++
    	}
    	pe.Curr++
    	pe.Left = pe.Total - pe.Curr
    	pe.TimeSoFar = time.Since(pe.timeStart)
    
    	realTotal := pe.Total - pe.Skipped
    	realCurr := pe.Curr - pe.Skipped
    	if realCurr == 0 || realTotal == 0 {
    		pe.EstimatedTimeLeft = pe.TimeSoFar
    	} else {
    		pe.PercDone = float64(realCurr) / float64(realTotal) // 0..1 range
    		realPerc := float64(realTotal) / float64(realCurr)
    		estimatedTotalTime := float64(pe.TimeSoFar) * realPerc
    		pe.EstimatedTimeLeft = time.Duration(estimatedTotalTime) - pe.TimeSoFar
    	}
    	cpy := pe.ProgressEstimatorData
    	return cpy
    }
    
    // Next advances estimator
    func (pe *ProgressEstimator) Next() ProgressEstimatorData {
    	return pe.next(false)
    }
    
    // Skip advances estimator but allows to mark this file as taking no time,
    // to allow better estimates
    func (pe *ProgressEstimator) Skip() ProgressEstimatorData {
    	return pe.next(true)
    }
  • https server via let's encrypt
    package main
    
    import (
    	"crypto/tls"
    	"golang.org/x/crypto/acme/autocert"
    	"log"
    	"net"
    	"net/http"
    )
    
    var domain string = "example.com"
    
    func redirect(w http.ResponseWriter, req *http.Request) {
    
    	ip, _, _ := net.SplitHostPort(req.RemoteAddr)
    	target := "https://" + req.Host
    	agent := req.UserAgent()
    	log.Printf("Redirecting [%s] - [%s] to: %s", agent, ip, target)
    	http.Redirect(w, req, target, http.StatusTemporaryRedirect)
    
    }
    
    func main() {
    
    	staticDir := "static"
    	certCache := "tmp/certs"
    	TLSAddr := ":https"
    
    	// Use LE key & cert retrieved from tls.Config GetCertificate
    	certFile, keyFile := "", ""
    
    	certManager := autocert.Manager{
    		Prompt:     autocert.AcceptTOS,
    		HostPolicy: autocert.HostWhitelist(domain),
    		Cache:      autocert.DirCache(certCache),
    	}
    
    	SetHSTSHeader := func(h http.Handler) http.HandlerFunc {
    		return func(w http.ResponseWriter, r *http.Request) {
    			ip, _, _ := net.SplitHostPort(r.RemoteAddr)
    
    			l := "[" + r.UserAgent() + "] - [" + ip + "]"
    			log.Print(l)
    
    			headers := map[string]string{
    				"Strict-Transport-Security": "max-age=31557600; includeSubDomains",
    				"X-Content-Type-Options":    "nosniff",
    				"X-XSS-Protection":          "1; mode=block",
    				"Content-Security-Policy":   "default-src 'self'; script-src 'self'",
    				"X-Frame-Options":           "DENY",
    				"Referrer-Policy":           "no-referrer",
    				"Public-Key-Pins": 
    					"pin-sha256=\"uIgDNRW0N1ZqFBvx6qJWFqIlaR2rZH/Yr35ZNB+KdHE=\";" + // Site cert
    					"pin-sha256=\"BackupBackupBackupBackupBackupBackupBackups=\";" + // Site backup cert
    					"pin-sha256=\"YLh1dUR9y6Kja30RrAn7JKnbQG/uEtLMkBgFF2Fuihg=\";" + // LE X3 root CA
    					"pin-sha256=\"Vjs8r4z+80wjNcr1YKepWQboSIRi63WsWXhIMN+eWys=\";" + // LE DST X3 root CA
    					"includeSubdomains; max-age=2592000",
    			}
    			for k, v := range headers {
    				//fmt.Println(k, ":", v)
    				w.Header().Add(k, v)
    			}
    			h.ServeHTTP(w, r)
    		}
    	}
    
    	HTTPSServer := &http.Server{
    		Addr: TLSAddr,
    		TLSConfig: &tls.Config{
    			GetCertificate: certManager.GetCertificate, MinVersion: tls.VersionTLS10, // MaxVersion: tls.VersionTLS12,
    			CipherSuites: []uint16{
    				tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
    				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
    				tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
    				tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // Required by Go (and HTTP/2 RFC), even if you only present ECDSA certs
    				tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
    				/*
    					tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
    					tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
    					tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
    					tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
    					tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
    				*/
    			},
    		},
    	}
    
    	http.Handle("/", SetHSTSHeader(http.FileServer(http.Dir(staticDir))))
    
    	// HTTP (redirect) Listener
    	go http.ListenAndServe(":80", http.HandlerFunc(redirect))
    
    	// HTTPS Listener
    	err := HTTPSServer.ListenAndServeTLS(certFile, keyFile)
    
    	log.Fatal(err)
    }
  • open browser in mac/windows
    func openBrowser(url string) {
    	var err error
    
    	switch runtime.GOOS {
    	case "linux":
    		err = exec.Command("xdg-open", url).Start()
    	case "windows":
    		err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
    	case "darwin":
    		err = exec.Command("open", url).Start()
    	default:
    		err = fmt.Errorf("unsupported platform")
    	}
    	if err != nil {
    		log.Fatal(err)
    	}
    
    }
  • set/get firestore document
    func panicIfErr(err error) {
    	if err != nil {
    		panic(err.Error())
    	}
    }
    
    type Foo struct {
    	Str     string   `json:"str"`
    	Str2    string   `json:"str-2" firestore:"str-2"`
    	StrArr  []string `json:"strarr"`
    	StrArr2 []string `json:"str-arr" firestore:"str-arr"`
    }
    
    func dbTestSetGet() {
    	db, ctx := getDB()
    	d := map[string]interface{}{
    		"str":     "string value str",
    		"str-2":   "string value str-2",
    		"strarr":  []string{"string", "array", "for", "strarr"},
    		"str-arr": []string{"string", "array", "for", "str-arr"},
    	}
    	_, err := db.Collection("foo").Doc("bar").Set(ctx, d)
    	panicIfErr(err)
    	doc, err := db.Collection("foo").Doc("bar").Get(ctx)
    	panicIfErr(err)
    	data := doc.Data()
    	fmt.Printf("Data: %#v\n", data)
    	var foo Foo
    	err = doc.DataTo(&foo)
    	panicIfErr(err)
    	fmt.Printf("foo: %#v\n", foo)
    }
  • normalizeNewlines
    func normalizeNewlines(d []byte) []byte {
    	// replace CR LF (windows) with LF (unix)
    	d = bytes.Replace(d, []byte{13, 10}, []byte{10}, -1)
    	// replace CF (mac) with LF (unix)
    	d = bytes.Replace(d, []byte{13}, []byte{10}, -1)
    	return d
    }
  • bytesRemoveFirstLine
    // return first line of d and the rest
    func bytesRemoveFirstLine(d []byte) (string, []byte) {
    	idx := bytes.IndexByte(d, 10)
    	//u.PanicIf(-1 == idx)
    	if -1 == idx {
    		return string(d), nil
    	}
    	l := d[:idx]
    	return string(l), d[idx+1:]
    }
  • panicIf
    // FmtArgs formats args as a string. First argument should be format string
    // and the rest are arguments to the format
    func FmtArgs(args ...interface{}) string {
    	if len(args) == 0 {
    		return ""
    	}
    	format := args[0].(string)
    	if len(args) == 1 {
    		return format
    	}
    	return fmt.Sprintf(format, args[1:]...)
    }
    
    func panicWithMsg(defaultMsg string, args ...interface{}) {
    	s := FmtArgs(args...)
    	if s == "" {
    		s = defaultMsg
    	}
    	fmt.Printf("%s\n", s)
    	panic(s)
    }
    
    // PanicIf panics if cond is true
    func PanicIf(cond bool, args ...interface{}) {
    	if !cond {
    		return
    	}
    	panicWithMsg("PanicIf: condition failed", args...)
    }
  • remove duplicate strings from array
    // RemoveDuplicateStrings removes duplicate strings from an array of strings.
    // It's optimized for the case of no duplicates. It modifes a in place.
    func RemoveDuplicateStrings(a []string) []string {
        if len(a) < 2 {
            return a
        }
        sort.Strings(a)
        writeIdx := 1
        for i := 1; i < len(a); i++ {
            if a[i-1] == a[i] {
                continue
            }
            if writeIdx != i {
                a[writeIdx] = a[i]
            }
            writeIdx++
        }
        return a[:writeIdx]
    }
  • zip
    • unzip .zip file to a directory
      func recreateDir(dir string) error {
      	err := os.RemoveAll(dir)
      	if err != nil {
      		return err
      	}
      	return os.MkdirAll(dir, 0755)
      }
      
      func createDirForFile(path string) error {
      	dir := filepath.Dir(path)
      	return os.MkdirAll(dir, 0755)
      }
      
      func unzipFile(f *zip.File, dstPath string) error {
      	r, err := f.Open()
      	if err != nil {
      		return err
      	}
      	defer r.Close()
      
      	err = createDirForFile(dstPath)
      	if err != nil {
      		return err
      	}
      
      	w, err := os.Create(dstPath)
      	if err != nil {
      		return err
      	}
      	_, err = io.Copy(w, r)
      	if err != nil {
      		w.Close()
      		os.Remove(dstPath)
      		return err
      	}
      	err = w.Close()
      	if err != nil {
      		os.Remove(dstPath)
      		return err
      	}
      	return nil
      }
      
      func unzip(zipPath string, destDir string) error {
      	st, err := os.Stat(zipPath)
      	if err != nil {
      		return err
      	}
      	fileSize := st.Size()
      	f, err := os.Open(zipPath)
      	if err != nil {
      		return err
      	}
      	defer f.Close()
      
      	zr, err := zip.NewReader(f, fileSize)
      	if err != nil {
      		return err
      	}
      	err = recreateDir(destDir)
      	if err != nil {
      		return err
      	}
      
      	for _, fi := range zr.File {
      		if fi.FileInfo().IsDir() {
      			continue
      		}
      		destPath := filepath.Join(destDir, fi.Name)
      		err = unzipFile(fi, destPath)
      		if err != nil {
      			os.RemoveAll(destDir)
      			return err
      		}
      	}
      	return nil
      }

Feedback about page:

Feedback:
Optional: your email if you want me to get back to you:

Share on        

Need fast, offline access to 190+ programmer API docs? Try my app Documentalist for Windows