t/nvmept_trim: increase transfer size for some tests
[fio.git] / eta.c
... / ...
CommitLineData
1/*
2 * Status and ETA code
3 */
4#include <unistd.h>
5#include <string.h>
6#include <stdlib.h>
7#ifdef CONFIG_VALGRIND_DEV
8#include <valgrind/drd.h>
9#else
10#define DRD_IGNORE_VAR(x) do { } while (0)
11#endif
12
13#include "fio.h"
14#include "lib/pow2.h"
15
16static char __run_str[REAL_MAX_JOBS + 1];
17static char run_str[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 1];
18
19static void update_condensed_str(char *rstr, char *run_str_condensed)
20{
21 if (*rstr) {
22 while (*rstr) {
23 int nr = 1;
24
25 *run_str_condensed++ = *rstr++;
26 while (*(rstr - 1) == *rstr) {
27 rstr++;
28 nr++;
29 }
30 run_str_condensed += sprintf(run_str_condensed, "(%u),", nr);
31 }
32 run_str_condensed--;
33 }
34 *run_str_condensed = '\0';
35}
36
37/*
38 * Sets the status of the 'td' in the printed status map.
39 */
40static void check_str_update(struct thread_data *td)
41{
42 char c = __run_str[td->thread_number - 1];
43
44 switch (td->runstate) {
45 case TD_REAPED:
46 if (td->error)
47 c = 'X';
48 else if (td->sig)
49 c = 'K';
50 else
51 c = '_';
52 break;
53 case TD_EXITED:
54 c = 'E';
55 break;
56 case TD_RAMP:
57 c = '/';
58 break;
59 case TD_RUNNING:
60 if (td_rw(td)) {
61 if (td_random(td)) {
62 if (td->o.rwmix[DDIR_READ] == 100)
63 c = 'r';
64 else if (td->o.rwmix[DDIR_WRITE] == 100)
65 c = 'w';
66 else
67 c = 'm';
68 } else {
69 if (td->o.rwmix[DDIR_READ] == 100)
70 c = 'R';
71 else if (td->o.rwmix[DDIR_WRITE] == 100)
72 c = 'W';
73 else
74 c = 'M';
75 }
76 } else if (td_read(td)) {
77 if (td_random(td))
78 c = 'r';
79 else
80 c = 'R';
81 } else if (td_write(td)) {
82 if (td_random(td))
83 c = 'w';
84 else
85 c = 'W';
86 } else {
87 if (td_random(td))
88 c = 'd';
89 else
90 c = 'D';
91 }
92 break;
93 case TD_PRE_READING:
94 c = 'p';
95 break;
96 case TD_VERIFYING:
97 c = 'V';
98 break;
99 case TD_FSYNCING:
100 c = 'F';
101 break;
102 case TD_FINISHING:
103 c = 'f';
104 break;
105 case TD_CREATED:
106 c = 'C';
107 break;
108 case TD_INITIALIZED:
109 case TD_SETTING_UP:
110 c = 'I';
111 break;
112 case TD_NOT_CREATED:
113 c = 'P';
114 break;
115 default:
116 log_err("state %d\n", td->runstate);
117 }
118
119 __run_str[td->thread_number - 1] = c;
120 update_condensed_str(__run_str, run_str);
121}
122
123/*
124 * Convert seconds to a printable string.
125 */
126void eta_to_str(char *str, unsigned long eta_sec)
127{
128 unsigned int d, h, m, s;
129 int disp_hour = 0;
130
131 if (eta_sec == -1) {
132 sprintf(str, "--");
133 return;
134 }
135
136 s = eta_sec % 60;
137 eta_sec /= 60;
138 m = eta_sec % 60;
139 eta_sec /= 60;
140 h = eta_sec % 24;
141 eta_sec /= 24;
142 d = eta_sec;
143
144 if (d) {
145 disp_hour = 1;
146 str += sprintf(str, "%02ud:", d);
147 }
148
149 if (h || disp_hour)
150 str += sprintf(str, "%02uh:", h);
151
152 str += sprintf(str, "%02um:", m);
153 sprintf(str, "%02us", s);
154}
155
156/*
157 * Best effort calculation of the estimated pending runtime of a job.
158 */
159static unsigned long thread_eta(struct thread_data *td)
160{
161 unsigned long long bytes_total, bytes_done;
162 unsigned long eta_sec = 0;
163 unsigned long elapsed;
164 uint64_t timeout;
165
166 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
167 timeout = td->o.timeout / 1000000UL;
168
169 bytes_total = td->total_io_size;
170
171 if (td->flags & TD_F_NO_PROGRESS)
172 return -1;
173
174 if (td->o.fill_device && td->o.size == -1ULL) {
175 if (!td->fill_device_size || td->fill_device_size == -1ULL)
176 return 0;
177
178 bytes_total = td->fill_device_size;
179 }
180
181 /*
182 * If io_size is set, bytes_total is an exact value that does not need
183 * adjustment.
184 */
185 if (td->o.zone_size && td->o.zone_skip && bytes_total &&
186 !fio_option_is_set(&td->o, io_size)) {
187 unsigned int nr_zones;
188 uint64_t zone_bytes;
189
190 /*
191 * Calculate the upper bound of the number of zones that will
192 * be processed, including skipped bytes between zones. If this
193 * is larger than total_io_size (e.g. when --io_size or --size
194 * specify a small value), use the lower bound to avoid
195 * adjustments to a negative value that would result in a very
196 * large bytes_total and an incorrect eta.
197 */
198 zone_bytes = td->o.zone_size + td->o.zone_skip;
199 nr_zones = (bytes_total + zone_bytes - 1) / zone_bytes;
200 if (bytes_total < nr_zones * td->o.zone_skip)
201 nr_zones = bytes_total / zone_bytes;
202 bytes_total -= nr_zones * td->o.zone_skip;
203 }
204
205 /*
206 * if writing and verifying afterwards, bytes_total will be twice the
207 * size. In a mixed workload, verify phase will be the size of the
208 * first stage writes.
209 */
210 if (td->o.do_verify && td->o.verify && td_write(td)) {
211 if (td_rw(td)) {
212 unsigned int perc = 50;
213
214 if (td->o.rwmix[DDIR_WRITE])
215 perc = td->o.rwmix[DDIR_WRITE];
216
217 bytes_total += (bytes_total * perc) / 100;
218 } else {
219 bytes_total <<= 1;
220 }
221 }
222
223 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
224 double perc, perc_t;
225
226 bytes_done = ddir_rw_sum(td->io_bytes);
227
228 if (bytes_total) {
229 perc = (double) bytes_done / (double) bytes_total;
230 if (perc > 1.0)
231 perc = 1.0;
232 } else {
233 perc = 0.0;
234 }
235
236 if (td->o.time_based) {
237 if (timeout) {
238 perc_t = (double) elapsed / (double) timeout;
239 if (perc_t < perc)
240 perc = perc_t;
241 } else {
242 /*
243 * Will never hit, we can't have time_based
244 * without a timeout set.
245 */
246 perc = 0.0;
247 }
248 }
249
250 if (perc == 0.0) {
251 eta_sec = timeout;
252 } else {
253 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
254 }
255
256 if (td->o.timeout &&
257 eta_sec > (timeout + done_secs - elapsed))
258 eta_sec = timeout + done_secs - elapsed;
259 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
260 || td->runstate == TD_INITIALIZED
261 || td->runstate == TD_SETTING_UP
262 || td->runstate == TD_RAMP
263 || td->runstate == TD_PRE_READING) {
264 int64_t t_eta = 0, r_eta = 0;
265 unsigned long long rate_bytes;
266
267 /*
268 * We can only guess - assume it'll run the full timeout
269 * if given, otherwise assume it'll run at the specified rate.
270 */
271 if (td->o.timeout) {
272 uint64_t __timeout = td->o.timeout;
273 uint64_t start_delay = td->o.start_delay;
274 uint64_t ramp_time = td->o.ramp_time;
275
276 t_eta = __timeout + start_delay;
277 if (!td->ramp_time_over) {
278 t_eta += ramp_time;
279 }
280 t_eta /= 1000000ULL;
281
282 if ((td->runstate == TD_RAMP) && in_ramp_time(td)) {
283 unsigned long ramp_left;
284
285 ramp_left = mtime_since_now(&td->epoch);
286 ramp_left = (ramp_left + 999) / 1000;
287 if (ramp_left <= t_eta)
288 t_eta -= ramp_left;
289 }
290 }
291 rate_bytes = 0;
292 if (td_read(td))
293 rate_bytes = td->o.rate[DDIR_READ];
294 if (td_write(td))
295 rate_bytes += td->o.rate[DDIR_WRITE];
296 if (td_trim(td))
297 rate_bytes += td->o.rate[DDIR_TRIM];
298
299 if (rate_bytes) {
300 r_eta = bytes_total / rate_bytes;
301 r_eta += (td->o.start_delay / 1000000ULL);
302 }
303
304 if (r_eta && t_eta)
305 eta_sec = min(r_eta, t_eta);
306 else if (r_eta)
307 eta_sec = r_eta;
308 else if (t_eta)
309 eta_sec = t_eta;
310 else
311 eta_sec = 0;
312 } else {
313 /*
314 * thread is already done or waiting for fsync
315 */
316 eta_sec = 0;
317 }
318
319 return eta_sec;
320}
321
322static void calc_rate(int unified_rw_rep, unsigned long mtime,
323 unsigned long long *io_bytes,
324 unsigned long long *prev_io_bytes, uint64_t *rate)
325{
326 int i;
327
328 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
329 unsigned long long diff, this_rate;
330
331 diff = io_bytes[i] - prev_io_bytes[i];
332 if (mtime)
333 this_rate = ((1000 * diff) / mtime) / 1024; /* KiB/s */
334 else
335 this_rate = 0;
336
337 if (unified_rw_rep == UNIFIED_MIXED) {
338 rate[i] = 0;
339 rate[0] += this_rate;
340 } else
341 rate[i] = this_rate;
342
343 prev_io_bytes[i] = io_bytes[i];
344 }
345}
346
347static void calc_iops(int unified_rw_rep, unsigned long mtime,
348 unsigned long long *io_iops,
349 unsigned long long *prev_io_iops, unsigned int *iops)
350{
351 int i;
352
353 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
354 unsigned long long diff, this_iops;
355
356 diff = io_iops[i] - prev_io_iops[i];
357 if (mtime)
358 this_iops = (diff * 1000) / mtime;
359 else
360 this_iops = 0;
361
362 if (unified_rw_rep == UNIFIED_MIXED) {
363 iops[i] = 0;
364 iops[0] += this_iops;
365 } else
366 iops[i] = this_iops;
367
368 prev_io_iops[i] = io_iops[i];
369 }
370}
371
372/*
373 * Allow a little slack - if we're within 95% of the time, allow ETA.
374 */
375bool eta_time_within_slack(unsigned int time)
376{
377 return time > ((eta_interval_msec * 95) / 100);
378}
379
380/*
381 * These are the conditions under which we might be able to skip the eta
382 * calculation.
383 */
384static bool skip_eta()
385{
386 if (!(output_format & FIO_OUTPUT_NORMAL) && f_out == stdout)
387 return true;
388 if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
389 return true;
390 if (!isatty(STDOUT_FILENO) && eta_print != FIO_ETA_ALWAYS)
391 return true;
392
393 return false;
394}
395
396/*
397 * Print status of the jobs we know about. This includes rate estimates,
398 * ETA, thread state, etc.
399 */
400static bool calc_thread_status(struct jobs_eta *je, int force)
401{
402 int unified_rw_rep;
403 bool any_td_in_ramp;
404 uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
405 unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
406 unsigned long long io_iops[DDIR_RWDIR_CNT] = {};
407 struct timespec now;
408
409 static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
410 static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
411 static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
412 static struct timespec rate_prev_time, disp_prev_time;
413
414 bool ret = true;
415
416 if (!force && skip_eta()) {
417 if (write_bw_log)
418 ret = false;
419 else
420 return false;
421 }
422
423 if (!ddir_rw_sum(rate_io_bytes))
424 fill_start_time(&rate_prev_time);
425 if (!ddir_rw_sum(disp_io_bytes))
426 fill_start_time(&disp_prev_time);
427
428 eta_secs = calloc(thread_number, sizeof(uint64_t));
429
430 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
431
432 bw_avg_time = ULONG_MAX;
433 unified_rw_rep = 0;
434 for_each_td(td) {
435 unified_rw_rep += td->o.unified_rw_rep;
436 if (is_power_of_2(td->o.kb_base))
437 je->is_pow2 = 1;
438 je->unit_base = td->o.unit_base;
439 je->sig_figs = td->o.sig_figs;
440 if (td->o.bw_avg_time < bw_avg_time)
441 bw_avg_time = td->o.bw_avg_time;
442 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
443 || td->runstate == TD_FSYNCING
444 || td->runstate == TD_PRE_READING
445 || td->runstate == TD_FINISHING) {
446 je->nr_running++;
447 if (td_read(td)) {
448 je->t_rate[0] += td->o.rate[DDIR_READ];
449 je->t_iops[0] += td->o.rate_iops[DDIR_READ];
450 je->m_rate[0] += td->o.ratemin[DDIR_READ];
451 je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
452 }
453 if (td_write(td)) {
454 je->t_rate[1] += td->o.rate[DDIR_WRITE];
455 je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
456 je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
457 je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
458 }
459 if (td_trim(td)) {
460 je->t_rate[2] += td->o.rate[DDIR_TRIM];
461 je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
462 je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
463 je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
464 }
465
466 je->files_open += td->nr_open_files;
467 } else if (td->runstate == TD_RAMP) {
468 je->nr_running++;
469 je->nr_ramp++;
470 } else if (td->runstate == TD_SETTING_UP)
471 je->nr_setting_up++;
472 else if (td->runstate < TD_RUNNING)
473 je->nr_pending++;
474
475 if (je->elapsed_sec >= 3)
476 eta_secs[__td_index] = thread_eta(td);
477 else
478 eta_secs[__td_index] = INT_MAX;
479
480 check_str_update(td);
481
482 if (td->runstate > TD_SETTING_UP) {
483 int ddir;
484
485 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
486 if (unified_rw_rep) {
487 io_bytes[0] += td->io_bytes[ddir];
488 io_iops[0] += td->io_blocks[ddir];
489 } else {
490 io_bytes[ddir] += td->io_bytes[ddir];
491 io_iops[ddir] += td->io_blocks[ddir];
492 }
493 }
494 }
495 } end_for_each();
496
497 if (exitall_on_terminate) {
498 je->eta_sec = INT_MAX;
499 for_each_td_index() {
500 if (eta_secs[__td_index] < je->eta_sec)
501 je->eta_sec = eta_secs[__td_index];
502 } end_for_each();
503 } else {
504 unsigned long eta_stone = 0;
505
506 je->eta_sec = 0;
507 for_each_td(td) {
508 if ((td->runstate == TD_NOT_CREATED) && td->o.stonewall)
509 eta_stone += eta_secs[__td_index];
510 else {
511 if (eta_secs[__td_index] > je->eta_sec)
512 je->eta_sec = eta_secs[__td_index];
513 }
514 } end_for_each();
515 je->eta_sec += eta_stone;
516 }
517
518 free(eta_secs);
519
520 fio_gettime(&now, NULL);
521 rate_time = mtime_since(&rate_prev_time, &now);
522
523 any_td_in_ramp = false;
524 for_each_td(td) {
525 any_td_in_ramp |= in_ramp_time(td);
526 } end_for_each();
527 if (write_bw_log && rate_time > bw_avg_time && !any_td_in_ramp) {
528 calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
529 je->rate);
530 memcpy(&rate_prev_time, &now, sizeof(now));
531 regrow_agg_logs();
532 for_each_rw_ddir(ddir) {
533 add_agg_sample(sample_val(je->rate[ddir]), ddir, 0);
534 }
535 }
536
537 disp_time = mtime_since(&disp_prev_time, &now);
538
539 if (!force && !eta_time_within_slack(disp_time))
540 return false;
541
542 calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
543 calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
544
545 memcpy(&disp_prev_time, &now, sizeof(now));
546
547 if (!force && !je->nr_running && !je->nr_pending)
548 return false;
549
550 je->nr_threads = thread_number;
551 update_condensed_str(__run_str, run_str);
552 memcpy(je->run_str, run_str, strlen(run_str));
553 return ret;
554}
555
556static int gen_eta_str(struct jobs_eta *je, char *p, size_t left,
557 char **rate_str, char **iops_str)
558{
559 static const char c[DDIR_RWDIR_CNT] = {'r', 'w', 't'};
560 bool has[DDIR_RWDIR_CNT];
561 bool has_any = false;
562 const char *sep;
563 int l = 0;
564
565 for_each_rw_ddir(ddir) {
566 has[ddir] = (je->rate[ddir] || je->iops[ddir]);
567 has_any |= has[ddir];
568 }
569 if (!has_any)
570 return 0;
571
572 l += snprintf(p + l, left - l, "[");
573 sep = "";
574 for_each_rw_ddir(ddir) {
575 if (has[ddir]) {
576 l += snprintf(p + l, left - l, "%s%c=%s",
577 sep, c[ddir], rate_str[ddir]);
578 sep = ",";
579 }
580 }
581 l += snprintf(p + l, left - l, "][");
582 sep = "";
583 for_each_rw_ddir(ddir) {
584 if (has[ddir]) {
585 l += snprintf(p + l, left - l, "%s%c=%s",
586 sep, c[ddir], iops_str[ddir]);
587 sep = ",";
588 }
589 }
590 l += snprintf(p + l, left - l, " IOPS]");
591
592 return l;
593}
594
595void display_thread_status(struct jobs_eta *je)
596{
597 static struct timespec disp_eta_new_line;
598 static int eta_new_line_init, eta_new_line_pending;
599 static int linelen_last;
600 static int eta_good;
601 char output[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 512], *p = output;
602 char eta_str[128];
603 double perc = 0.0;
604
605 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
606 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
607 eta_to_str(eta_str, je->eta_sec);
608 }
609
610 if (eta_new_line_pending) {
611 eta_new_line_pending = 0;
612 linelen_last = 0;
613 p += sprintf(p, "\n");
614 }
615
616 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
617
618 /* rate limits, if any */
619 if (je->m_rate[0] || je->m_rate[1] || je->m_rate[2] ||
620 je->t_rate[0] || je->t_rate[1] || je->t_rate[2]) {
621 char *tr, *mr;
622
623 mr = num2str(je->m_rate[0] + je->m_rate[1] + je->m_rate[2],
624 je->sig_figs, 1, je->is_pow2, N2S_BYTEPERSEC);
625 tr = num2str(je->t_rate[0] + je->t_rate[1] + je->t_rate[2],
626 je->sig_figs, 1, je->is_pow2, N2S_BYTEPERSEC);
627
628 p += sprintf(p, ", %s-%s", mr, tr);
629 free(tr);
630 free(mr);
631 } else if (je->m_iops[0] || je->m_iops[1] || je->m_iops[2] ||
632 je->t_iops[0] || je->t_iops[1] || je->t_iops[2]) {
633 p += sprintf(p, ", %d-%d IOPS",
634 je->m_iops[0] + je->m_iops[1] + je->m_iops[2],
635 je->t_iops[0] + je->t_iops[1] + je->t_iops[2]);
636 }
637
638 /* current run string, % done, bandwidth, iops, eta */
639 if (je->eta_sec != INT_MAX && je->nr_running) {
640 char perc_str[32];
641 char *iops_str[DDIR_RWDIR_CNT];
642 char *rate_str[DDIR_RWDIR_CNT];
643 size_t left;
644 int l;
645 int ddir;
646 int linelen;
647
648 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running ||
649 je->eta_sec == -1)
650 strcpy(perc_str, "-.-%");
651 else {
652 double mult = 100.0;
653
654 if (je->nr_setting_up && je->nr_running)
655 mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
656
657 eta_good = 1;
658 perc *= mult;
659 sprintf(perc_str, "%3.1f%%", perc);
660 }
661
662 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
663 rate_str[ddir] = num2str(je->rate[ddir], 4,
664 1024, je->is_pow2, je->unit_base);
665 iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, N2S_NONE);
666 }
667
668 left = sizeof(output) - (p - output) - 1;
669 l = snprintf(p, left, ": [%s][%s]", je->run_str, perc_str);
670 l += gen_eta_str(je, p + l, left - l, rate_str, iops_str);
671 l += snprintf(p + l, left - l, "[eta %s]", eta_str);
672
673 /* If truncation occurred adjust l so p is on the null */
674 if (l >= left)
675 l = left - 1;
676 p += l;
677 linelen = p - output;
678 if (l >= 0 && linelen < linelen_last)
679 p += sprintf(p, "%*s", linelen_last - linelen, "");
680 linelen_last = linelen;
681
682 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
683 free(rate_str[ddir]);
684 free(iops_str[ddir]);
685 }
686 }
687 sprintf(p, "\r");
688
689 printf("%s", output);
690
691 if (!eta_new_line_init) {
692 fio_gettime(&disp_eta_new_line, NULL);
693 eta_new_line_init = 1;
694 } else if (eta_new_line && mtime_since_now(&disp_eta_new_line) > eta_new_line) {
695 fio_gettime(&disp_eta_new_line, NULL);
696 eta_new_line_pending = 1;
697 }
698
699 fflush(stdout);
700}
701
702struct jobs_eta *get_jobs_eta(bool force, size_t *size)
703{
704 struct jobs_eta *je;
705
706 if (!thread_number)
707 return NULL;
708
709 *size = sizeof(*je) + THREAD_RUNSTR_SZ + 8;
710 je = calloc(1, *size);
711 if (!je)
712 return NULL;
713
714 if (!calc_thread_status(je, force)) {
715 free(je);
716 return NULL;
717 }
718
719 *size = sizeof(*je) + strlen((char *) je->run_str) + 1;
720 return je;
721}
722
723void print_thread_status(void)
724{
725 struct jobs_eta *je;
726 size_t size;
727
728 je = get_jobs_eta(false, &size);
729 if (je) {
730 display_thread_status(je);
731 free(je);
732 }
733}
734
735void print_status_init(int thr_number)
736{
737 struct jobs_eta_packed jep;
738
739 compiletime_assert(sizeof(struct jobs_eta) == sizeof(jep), "jobs_eta");
740
741 DRD_IGNORE_VAR(__run_str);
742 __run_str[thr_number] = 'P';
743 update_condensed_str(__run_str, run_str);
744}