1 ///Parameterize a benchmark
2 module testquark.parabench;
3 //An unfortunate requirement.
4 public import testquark.defines;
5 import std.range;
6 import std.traits;
7 import std.typecons;
8 import std.json;
9 import std.stdio;
10 /** Define a way of providing testing data to a benchmark,
11 * i.e. stop the compiler DFAing the magic away. It must be
12 * parameterized by a size_t such that it can be measured.
13 *
14 * You provide the generating algorithm and a range to specify
15 */
16 struct ParameterizerStruct(Range, FuncType)
17 if (isInputRange!Range && is(ElementType!Range : size_t) && (isCallable!FuncType
18 && arity!FuncType == 1 && __traits(isSame, Parameters!FuncType[0], size_t)))
19 {
20 NamedBenchmark loc;
21 Range store;
22 FuncType func;
23 ///If you already have the location
24 this(NamedBenchmark l, Range set, FuncType fSet)
25 {
26 loc = l;
27 store = set;
28 func = fSet;
29
30 }
31 ///
32 this(string name, Range set, FuncType fSet)
33 {
34 //loc = NamedBenchmark(name, line, file);
35 store = set;
36 func = fSet;
37
38 }
39
40 }
41 ///Split up operation for exposing to afar
42 struct Operation
43 {
44 //Size of task
45 size_t size;
46
47 //This atomizes the actual task in hand so it can be timed/measured externally
48 void delegate() runIt;
49 invariant(runIt !is null);
50 }
51 ///Abstract range interface to a benchmark for some type erasure
52 interface BenchmarkPrototype : InputRange!(Operation)
53 {
54 @property Operation front();
55 void popFront();
56 bool empty();
57 const size_t iterationsPerMeasurement();
58 }
59
60 auto godawfulHack(RetType_, RangeType_, FuncType_, Operation_, alias Item_, I, J, K, L)(I x, J y, K z, L fuck)
61 {
62 return new class (x, y, z, fuck) BenchmarkPrototype
63 {
64 RetType_ actOnThisData;
65 RangeType_ theRange;
66 FuncType_ theFunction;
67 Operation_ current;
68 NamedBenchmark loc;
69 typeof(&Item_) benchFunc;
70 @property Operation front()
71 {
72 return current;
73 }
74
75 size_t iterationsPerMeasurement() const
76 {
77 return loc.iterations;
78 }
79
80 void generateData()
81 {
82 current.runIt = &runTask;
83 actOnThisData = theFunction(current.size);
84
85 }
86
87 void popFront()
88 {
89 //writeln("popped", theRange.empty);
90 theRange.popFront;
91
92 if(!theRange.empty) current.size = theRange.moveFront;
93 generateData();
94 }
95
96 void runTask()
97 {
98 benchFunc(actOnThisData);
99 }
100
101 bool empty()
102 {
103 return theRange.empty;
104 }
105 Operation moveFront()
106 {
107 //writeln("moved");
108 return current;
109 }
110 int opApply(scope int delegate(Operation) it)
111 {
112 //writeln("opapplied");
113 while(!empty)
114 {
115 it(this.current);
116 if(!theRange.empty) this.popFront();
117 }
118 return 0;
119 }
120 int opApply(scope int delegate(ulong, Operation))
121 {
122 assert(0, "nonsensical construct");
123
124 }
125
126 this(NamedBenchmark sLoc, RangeType_ set, FuncType_ task, typeof(&Item_) setB)
127 {
128 theFunction = task;
129 theRange = set;
130 loc = sLoc;
131 //current = Operation(theRange.front, &this.runTask);
132 benchFunc = setB;
133 generateData;
134 }
135 };
136 }
137 auto MarkImpl(alias mod)(ref BenchmarkPrototype[NamedBenchmark] map)
138 {
139
140 void register(NamedBenchmark key, BenchmarkPrototype set)
141 {
142 map[key] = set;
143 }
144 foreach (sItem; __traits(allMembers, mod))
145 {
146
147 import std.stdio;
148
149 alias Item = __traits(getMember, mod, sItem);
150 const theAttributes = __traits(getAttributes, Item);
151 //pragma(msg, sItem, " ", theAttributes.length);
152 //No attributes
153 static if (theAttributes.length & isCallable!Item)
154 {
155
156 foreach (attr; theAttributes)
157 {
158 if (is(typeof(attr) : Template!Args, alias Template, Args...))
159 {
160 if (__traits(isSame, Template, ParameterizerStruct))
161 {
162 //pragma(msg, attr);
163 //pragma(msg, Args);
164 alias RangeType = Args[0];
165 alias FuncType = Args[1];
166
167 alias RetType = ReturnType!FuncType;
168 //pragma(msg, sItem);
169 static assert(__traits(isSame, RetType, Parameters!Item[0]),
170 "Functioning being benchmarked is not callable with data provided by parameterizer");
171 {
172 register(attr.loc, godawfulHack!(RetType, RangeType, FuncType, Operation, Item, typeof(attr.loc), typeof(attr.store), typeof(attr.func), typeof(&Item))(attr.loc, attr.store, attr.func, &Item));
173 }
174 }
175 }
176
177 }
178 }
179
180 }
181 return map;
182 }
183
184
185 pragma(lib, "papi");
186
187 auto runThem(Flag!"UsePapi" usePapi = Yes.UsePapi)(BenchmarkPrototype[NamedBenchmark] theBenchmarks, Flag!"FullData" fullDataView = No.FullData)
188 {
189 static if(usePapi)
190 {
191 pragma(msg, "Link with PAPI 5 for this to work, pragma(lib, \"papi\") should work\nSet a very low perf_paranoid");
192 //pragma(lib, "papi");
193 }
194 JSONValue runBenchmark(BenchmarkPrototype bench)
195 in(bench !is null, "Null pointer")
196 {
197 import std.conv : to;
198 ulong[string][string] data;
199 foreach (pair; bench)
200 {
201 long totalMeasured = 0;
202 foreach (i; 0 .. bench.iterationsPerMeasurement)
203 {
204
205 import std.datetime.stopwatch : StopWatch;
206 static if (usePapi) {
207 import testquark.papi;
208 import testquark.papiStdEventDefs;
209
210 enum numCounters = 1;
211 long[numCounters] values;
212
213 int[numCounters] eventSet = [PAPI_TOT_INS];
214
215 if (PAPI_start_counters(eventSet.ptr, numCounters) != PAPI_OK)
216 assert(0, "Papi Initialization failed");
217
218 pair.runIt();
219 //if (PAPI_read_counters(values.ptr, numCounters) != PAPI_OK)
220 //assert(0, "Failed reading counters");
221
222 if (PAPI_stop_counters(values.ptr, numCounters) != PAPI_OK)
223 assert(0, "Failing stopping counters?!");
224
225 const long measured = values[0];
226 } else {
227 auto sw = StopWatch(No.autoStart);
228 sw.start();
229 pair.runIt();
230 sw.stop();
231 const long measured = sw.peek.total!"usecs";
232 }
233
234 totalMeasured += measured;
235 if(fullDataView)
236 data[pair.size.to!string][i.to!string] = measured;
237 }
238 static if (usePapi)
239 {
240 enum name = "meaninstructions";
241 }
242 else {
243 enum name = "meanUsecs";
244 }
245 data[pair.size.to!string][name] = cast(ulong) (cast(float) totalMeasured / bench.iterationsPerMeasurement);
246
247 }
248
249 return JSONValue(data);
250 }
251
252 //writeln(theBenchmarks.length);
253 //theBenchmarks.length
254 ulong index = 0;
255 auto tmp = JSONValue(iota(index, theBenchmarks.length + 1).array);
256 foreach (NamedBenchmark key, value; theBenchmarks)
257 {
258
259 tmp[index].object(["fullDefinition" :key.toJson, "data": runBenchmark(value)]);
260 //tmp[key.name]["data"] = runBenchmark(value);
261 index += 1;
262 }
263 return tmp;
264 }
265 /** Mix this in to gain the power of benchmarks, if on auto they will run on module destruction
266 *
267 * Params:
268 * expose = Expose to benchquark proper
269 */
270 template BenchmarkInfrastructure(Flag!"Expose" expose = No.Expose, string M = __MODULE__)
271 {
272 BenchmarkPrototype[NamedBenchmark] benchmarks;
273 void setup()
274 {
275 benchmarks = MarkImpl!(mixin(M))(benchmarks);
276 }
277 void runAndPrint(Flag!"UsePapi" usePapi)()
278 {
279 setup();
280 runThem!usePapi(benchmarks).toPrettyString.writeln;
281 }
282 }
283
284 ///Use these (unless you want to manually specify range types into ParameterizeStruct)
285 public auto Parameterizer(Range, FuncType)(string name, Range x, FuncType func,
286 size_t iter = 1, string f = __FILE__, int l = __LINE__)
287 {
288 return Parameterizer!(Range, FuncType)(NamedBenchmark(name, iter, l, f), x, func);
289 }
290 ///Use these (unless you want to manually specify range types into ParameterizeStruct)
291 public auto Parameterizer(Range, FuncType)(NamedBenchmark loc, Range x, FuncType func)
292 {
293 return ParameterizerStruct!(Range, FuncType)(loc, x, func);
294 }
295 ///A simple benchmark involving specifying a sorting function
296
297
298
299