Skip to content

Commit

Permalink
Add multiple urls cli argument
Browse files Browse the repository at this point in the history
  • Loading branch information
bjarneo committed Mar 2, 2022
1 parent 7544a96 commit deacf8c
Show file tree
Hide file tree
Showing 6 changed files with 72 additions and 15 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,5 @@

# Dependency directories (remove the comment below to include it)
# vendor/

urls.txt
22 changes: 20 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,30 @@ This is a HTTP load testing tool that run requests concurrently. Written as a Go
- Set a timer in second for how long it should run
- Outputs table of statistics for the end result
- Log the requests to $HOME/rip.log
- Supports multiple URLs

## Coming

- JSON output of the result
- Support multiple URLs

## Usage

Install the binary from <https://github.com/bjarneo/rip/releases>, or go directly to the build the binary manually step.

```bash
$ rip -c 100 -t 10 https://your.domain.com
# Standard by using one url
rip -c 100 -t 10 https://your.domain.com

# Multiple urls
touch urls.txt

# Add the content, important that each url is on a newline
http://localhost:5000
http://localhost:5000/dis-is-nice
http://localhost:5000/yas

# RIP
rip -t 10 -u urls.txt
```

### The default values
Expand All @@ -34,6 +46,8 @@ Usage of rip
How many concurrent users to simulate (default: 10)
-l bool
Log the requests to $HOME/rip.log (default: false)
-u string
A file of URLs. Each URL should be on a new line. Will randomly choose an URL.
```

Expand Down Expand Up @@ -65,6 +79,10 @@ If you get this error message `socket: too many open files`, you might want to i
ulimit -n 12000
```

## Information

This tool is to be used on your own risk. Not to be used maliciously.

## LICENSE

See [LICENSE](./LICENSE)
5 changes: 2 additions & 3 deletions gui/statistics.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,13 @@ import (
"github.com/pterm/pterm"
)

func PrintTable(stats statistics.Statistics, url string) {
func PrintTable(stats statistics.Statistics) {
fmt.Println()

pterm.DefaultTable.WithHasHeader().WithData(
pterm.TableData{
{"URL", "Total", "Successful", "Failed", "Longest", "Shortest", "Elapsed Time", "Avg Response Time", "Data transferred"},
{"Total", "Successful", "Failed", "Longest", "Shortest", "Elapsed Time", "Avg Response Time", "Data transferred"},
{
url,
fmt.Sprintf("%d", stats.Total()),
fmt.Sprintf("%d", stats.Successful()),
fmt.Sprintf("%d", stats.Failure()),
Expand Down
15 changes: 9 additions & 6 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package main
import (
"fmt"
"io"
"math/rand"
"net/http"
"sync"
"time"
Expand All @@ -29,11 +30,13 @@ var logToFile = utils.Logger()
try to set ulimit to a higher number.
$ ulimit -n 12000
*/
func request(url string) bool {
func request(urls []string) bool {
start := utils.NowUnixMilli()

stats.SetTotal(1)

url := urls[rand.Intn(len(urls))]

resp, err := http.Get(url)

if err != nil {
Expand Down Expand Up @@ -70,12 +73,12 @@ func request(url string) bool {
return true
}

func workers(concurrent int, interval int, url string) {
func workers(concurrent int, interval int, urls []string) {
// Let us start the timer for how long the workers are running
start := utils.NowUnixMilli()
end := utils.FutureUnixMilli(interval)

spinner, _ := pterm.DefaultSpinner.Start(fmt.Sprintf("Load testing %s", url))
spinner, _ := pterm.DefaultSpinner.Start(fmt.Sprintf("Ongoing load testing.."))

var wg sync.WaitGroup

Expand All @@ -86,7 +89,7 @@ func workers(concurrent int, interval int, url string) {
// run the concurrent go routines
go func() {
for {
request(url)
request(urls)
}
}()
}
Expand Down Expand Up @@ -122,7 +125,7 @@ func workers(concurrent int, interval int, url string) {

func main() {
// Run until the interval is done
workers(args.Concurrent(), args.Interval(), args.Url())
workers(args.Concurrent(), args.Interval(), args.Urls())

gui.PrintTable(stats, args.Url())
gui.PrintTable(stats)
}
18 changes: 14 additions & 4 deletions utils/args.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,23 @@ type Arguments struct {
interval *int
logger *bool
url *string
urls *string
}

func Args() Arguments {
flags := Arguments{
concurrent: flag.Int("c", 10, "How many concurrent users to simulate"),
interval: flag.Int("t", 60, "How many seconds to run the test"),
logger: flag.Bool("l", false, "Log the requests to $HOME/rip.log"),
urls: flag.String("u", "", "A file of URLs. Each URL should be on a new line. It will randomly choose a URL."),
}

flag.Parse()

// The URL you want to load test
url := flag.Arg(0)
if url == "" {
fmt.Print("No URL provided. Example: $ rip https://www.google.com")
if url == "" && *flags.urls == "" {
fmt.Print("No URL provided. Example: $ rip https://www.google.com, or $ rip -u urls.txt.")

os.Exit(1)
}
Expand All @@ -47,6 +49,14 @@ func (flags *Arguments) Logger() bool {
return *flags.logger
}

func (flags *Arguments) Url() string {
return *flags.url
func (flags *Arguments) Urls() []string {
if *flags.urls != "" {
return FileURL(*flags.urls)
}

url := make([]string, 1)

url[0] = *flags.url

return url
}
25 changes: 25 additions & 0 deletions utils/init.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package utils

import (
"os"
"strings"
"time"
)

Expand All @@ -15,3 +17,26 @@ func FutureUnixMilli(interval int) int64 {

return future.UnixMilli()
}

// copy and pasted from stackoverflow because I am lazy
func deleteEmptyFromSlice(s []string) []string {
var r []string
for _, str := range s {
if str != "" {
r = append(r, str)
}
}
return r
}

func FileURL(urls string) []string {
data, err := os.ReadFile(urls)

if err != nil {
panic(err)
}

urlsToslice := deleteEmptyFromSlice(strings.Split(strings.ReplaceAll(string(data), "\r\n", "\n"), "\n"))

return urlsToslice
}

0 comments on commit deacf8c

Please sign in to comment.