Skip to content

Commit

Permalink
more f-string lint
Browse files Browse the repository at this point in the history
  • Loading branch information
minrk committed Dec 3, 2024
1 parent d3a8ab4 commit ba2af11
Show file tree
Hide file tree
Showing 21 changed files with 51 additions and 69 deletions.
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@
from datetime import date

project = 'ipyparallel'
copyright = '%04d, The IPython Development Team' % date.today().year
copyright = f'{date.today().year}, The IPython Development Team'
author = 'The IPython Development Team'

# The version info for the project you're documenting, acts as replacement for
Expand Down
6 changes: 3 additions & 3 deletions docs/source/examples/customresults.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def sleep_here(count, t):
import sys
import time

print("hi from engine %i" % id)
print(f"hi from engine {id}")
sys.stdout.flush()
time.sleep(t)
return count, t
Expand All @@ -52,12 +52,12 @@ def sleep_here(count, t):
for msg_id in finished:
# we know these are done, so don't worry about blocking
ar = rc.get_result(msg_id)
print("job id %s finished on engine %i" % (msg_id, ar.engine_id))
print(f"job id {msg_id} finished on engine {ar.engine_id}")
print("with stdout:")
print(' ' + ar.stdout.replace('\n', '\n ').rstrip())
print("and results:")

# note that each job in a map always returns a list of length chunksize
# even if chunksize == 1
for count, t in ar.get():
print(" item %i: slept for %.2fs" % (count, t))
print(f" item {count}: slept for {t:.2f}s")
6 changes: 3 additions & 3 deletions docs/source/examples/daVinci Word Count/pwordfreq.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,18 @@ def pwordfreq(view, fnames):
block = nlines // n
for i in range(n):
chunk = lines[i * block : i * (block + 1)]
with open('davinci%i.txt' % i, 'w', encoding='utf8') as f:
with open(f'davinci{i}.txt', 'w', encoding='utf8') as f:
f.write('\n'.join(chunk))

try: # python2
cwd = os.path.abspath(os.getcwdu())
except AttributeError: # python3
cwd = os.path.abspath(os.getcwd())
fnames = [os.path.join(cwd, 'davinci%i.txt' % i) for i in range(n)]
fnames = [os.path.join(cwd, f'davinci{i}.txt') for i in range(n)]
tic = time.time()
pfreqs = pwordfreq(view, fnames)
toc = time.time()
print_wordfreq(freqs)
print("Took %.3f s to calculate on %i engines" % (toc - tic, len(view.targets)))
print(f"Took {toc - tic:.3f}s to calculate on {len(view.targets)} engines")
# cleanup split files
map(os.remove, fnames)
2 changes: 1 addition & 1 deletion docs/source/examples/dagdeps.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def main(nodes, edges):

client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies" % (nodes, edges))
print(f"submitting {nodes} tasks with {edges} dependencies")
results = submit_jobs(view, G, jobs)
print("waiting for results")
client.wait_interactive()
Expand Down
4 changes: 2 additions & 2 deletions docs/source/examples/interengine/communicator.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def __init__(self, interface='tcp://*', identity=None):
# bind to ports
port = self.socket.bind_to_random_port(interface)
pub_port = self.pub.bind_to_random_port(interface)
self.url = interface + ":%i" % port
self.pub_url = interface + ":%i" % pub_port
self.url = f"{interface}:{port}"
self.pub_url = f"{interface}:{pub_port}"
# guess first public IP from socket
self.location = socket.gethostbyname_ex(socket.gethostname())[-1][0]
self.peers = {}
Expand Down
16 changes: 8 additions & 8 deletions docs/source/examples/itermapresult.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@
# create a Reference to `id`. This will be a different value on each engine
ref = ipp.Reference('id')
print("sleeping for `id` seconds on each engine")
tic = time.time()
tic = time.perf_counter()
ar = dv.apply(time.sleep, ref)
for i, r in enumerate(ar):
print("%i: %.3f" % (i, time.time() - tic))
print(f"{i}: {time.perf_counter() - tic:.3f}")


def sleep_here(t):
Expand All @@ -50,22 +50,22 @@ def sleep_here(t):
# one call per task
print("running with one call per task")
amr = v.map(sleep_here, [0.01 * t for t in range(100)])
tic = time.time()
tic = time.perf_counter()
for i, r in enumerate(amr):
print("task %i on engine %i: %.3f" % (i, r[0], time.time() - tic))
print(f"task {i} on engine {r[0]}: {time.perf_counter() - tic:.3f}")

print("running with four calls per task")
# with chunksize, we can have four calls per task
amr = v.map(sleep_here, [0.01 * t for t in range(100)], chunksize=4)
tic = time.time()
tic = time.perf_counter()
for i, r in enumerate(amr):
print("task %i on engine %i: %.3f" % (i, r[0], time.time() - tic))
print(f"task {i} on engine {r[0]}: {time.perf_counter() - tic:.3f}")

print("running with two calls per task, with unordered results")
# We can even iterate through faster results first, with ordered=False
amr = v.map(
sleep_here, [0.01 * t for t in range(100, 0, -1)], ordered=False, chunksize=2
)
tic = time.time()
tic = time.perf_counter()
for i, r in enumerate(amr):
print("slept %.2fs on engine %i: %.3f" % (r[1], r[0], time.time() - tic))
print(f"slept {r[1]:.2f}s on engine {r[0]}: {time.perf_counter() - tic:.3f}")
10 changes: 5 additions & 5 deletions docs/source/examples/pi/parallelpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
import ipyparallel as ipp

# Files with digits of pi (10m digits each)
filestring = 'pi200m.ascii.%(i)02dof20'
files = [filestring % {'i': i} for i in range(1, 21)]
filestring = 'pi200m.ascii.{}of20'
files = [filestring.format(i) for i in range(1, 21)]

# Connect to the IPython cluster
c = ipp.Client()
Expand All @@ -42,7 +42,7 @@
v = c[:]
v.block = True
# fetch the pi-files
print("downloading %i files of pi" % n)
print(f"downloading {n} files of pi")
v.map(fetch_pi_file, files[:n]) # noqa: F821
print("done")

Expand All @@ -60,10 +60,10 @@
freqs150m = reduce_freqs(freqs_all)
t2 = clock()
digits_per_second8 = n * 10.0e6 / (t2 - t1)
print("Digits per second (%i engines, %i0m digits): " % (n, n), digits_per_second8)
print(f"Digits per second ({n} engines, {n}0m digits): ", digits_per_second8)

print("Speedup: ", digits_per_second8 / digits_per_second1)

plot_two_digit_freqs(freqs150m)
plt.title("2 digit sequences in %i0m digits of pi" % n)
plt.title(f"2 digit sequences in {n}0m digits of pi")
plt.show()
9 changes: 3 additions & 6 deletions docs/source/examples/task_profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,7 @@ def main():
]
stime = sum(times)

print(
"executing %i tasks, totalling %.1f secs on %i engines"
% (opts.n, stime, nengines)
)
print(f"executing {opts.n} tasks, totalling {stime:.1f} secs on {nengines} engines")
time.sleep(1)
start = time.perf_counter()
amr = view.map(time.sleep, times)
Expand All @@ -74,8 +71,8 @@ def main():
scale = stime / ptime

print(f"executed {stime:.1f} secs in {ptime:.1f} secs")
print("%.3fx parallel performance on %i engines" % (scale, nengines))
print("%.1f%% of theoretical max" % (100 * scale / nengines))
print(f"{scale:.3f}x parallel performance on {nengines} engines")
print(f"{scale / nengines:.1%} of theoretical max")


if __name__ == '__main__':
Expand Down
4 changes: 2 additions & 2 deletions docs/source/examples/wave2D/RectPartitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def prepare_communication(self):

nsd_ = self.nsd
if nsd_ < 1:
print('Number of space dimensions is %d, nothing to do' % nsd_)
print(f'Number of space dimensions is {nsd_}, nothing to do')
return

self.subd_rank = [-1, -1, -1]
Expand Down Expand Up @@ -93,7 +93,7 @@ def prepare_communication(self):
self.subd_rank[1] = (my_id % offsets[2]) / self.num_parts[0]
self.subd_rank[2] = my_id / offsets[2]

print("my_id=%d, subd_rank: " % my_id, self.subd_rank)
print(f"my_id={my_id}, subd_rank={self.subd_rank}")
if my_id == 0:
print("offsets=", offsets)

Expand Down
4 changes: 2 additions & 2 deletions docs/source/examples/wave2D/communicator.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ def __init__(self, interface='tcp://*', identity=None):
northport = self.north.bind_to_random_port(interface)
eastport = self.east.bind_to_random_port(interface)

self.north_url = interface + ":%i" % northport
self.east_url = interface + ":%i" % eastport
self.north_url = f"{interface}:{northport}"
self.east_url = f"{interface}:{eastport}"

# guess first public IP from socket
self.location = socket.gethostbyname_ex(socket.gethostname())[-1][0]
Expand Down
10 changes: 3 additions & 7 deletions docs/source/examples/wave2D/parallelwave-mpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,9 @@ def wave_saver(u, x, y, t):
if partition is None:
partition = [1, num_procs]

assert partition[0] * partition[1] == num_procs, (
"can't map partition %s to %i engines"
% (
partition,
num_procs,
)
)
assert (
partition[0] * partition[1] == num_procs
), f"can't map partition {partition} to {num_procs} engines"

view = rc[:]
print(f"Running {grid} system on {partition} processes until {tstop:f}")
Expand Down
10 changes: 3 additions & 7 deletions docs/source/examples/wave2D/parallelwave.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,13 +126,9 @@ def wave_saver(u, x, y, t):
else:
num_procs = min(num_procs, partition[0] * partition[1])

assert partition[0] * partition[1] == num_procs, (
"can't map partition %s to %i engines"
% (
partition,
num_procs,
)
)
assert (
partition[0] * partition[1] == num_procs
), f"can't map partition {partition} to {num_procs} engines"

# construct the View:
view = rc[:num_procs]
Expand Down
9 changes: 1 addition & 8 deletions docs/source/examples/wave2D/wavesolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,14 +303,7 @@ def solve(self, tstop, dt=-1, user_action=None, verbose=False, final_test=False)

t1 = time.time()
print(
'my_id=%2d, dt=%g, %s version, slice_copy=%s, net Wtime=%g'
% (
partitioner.my_id,
dt,
implementation['inner'],
partitioner.slice_copy,
t1 - t0,
)
f"my_id={partitioner.my_id:2}, dt={dt:g}, {implementation['inner']} version, slice_copy={partitioner.slice_copy}, net Wtime={t1 - t0:g}"
)
# save the us
self.us = u, u_1, u_2
Expand Down
2 changes: 1 addition & 1 deletion ipyparallel/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def __repr__(self):
if len(text_out) > 32:
text_out = text_out[:29] + '...'

return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
return f"<ExecuteReply[{self.execution_count}]: {text_out}>"

def _plaintext(self):
execute_result = self.metadata['execute_result'] or {'data': {}}
Expand Down
2 changes: 1 addition & 1 deletion ipyparallel/controller/hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -1246,7 +1246,7 @@ def purge_results(self, client_id, msg):
for eid in eids:
if eid not in self.engines:
try:
raise IndexError("No such engine: %i" % eid)
raise IndexError(f"No such engine: {eid}")
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
Expand Down
2 changes: 1 addition & 1 deletion ipyparallel/controller/sqlitedb.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ def _init_db(self):
i = 0
while not self._check_table():
i += 1
self.table = first_table + '_%i' % i
self.table = f"{first_table}_{i}"
self.log.warning(
f"Table {previous_table} exists and doesn't match db format, trying {self.table}"
)
Expand Down
6 changes: 3 additions & 3 deletions ipyparallel/engine/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@ async def complete_registration(self, msg, connect, maybe_tunnel):

def url(key):
"""get zmq url for given channel"""
return str(info["interface"] + ":%i" % info[key])
return f"{info['interface']}:{info['key']}"

def urls(key):
return [f'{info["interface"]}:{port}' for port in info[key]]
Expand Down Expand Up @@ -777,7 +777,7 @@ def send_with_metadata(
content['hb_period'],
identity,
)
self.log.info("Completed registration with id %i" % self.id)
self.log.info(f"Completed registration with id {self.id}")

def start_nanny(self, control_url):
self.log.info("Starting nanny")
Expand Down Expand Up @@ -809,7 +809,7 @@ def start_heartbeat(self, hb_ping, hb_pong, hb_period, identity):
self._hb_listener = zmqstream.ZMQStream(mon, self.loop)
self._hb_listener.on_recv(self._report_ping)

hb_monitor = "tcp://%s:%i" % (localhost(), mport)
hb_monitor = f"tcp://{localhost()}:{mport}"

heart = Heart(
hb_ping,
Expand Down
2 changes: 1 addition & 1 deletion ipyparallel/engine/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@ def root_topic(self):
"""this is a property, in case the handler is created
before the engine gets registered with an id"""
if isinstance(getattr(self.engine, 'id', None), int):
return "engine.%i" % self.engine.id
return f"engine.{self.engine.id}"
else:
return "engine"
10 changes: 5 additions & 5 deletions ipyparallel/error.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,11 @@ def __str__(self):
engine_str = self._get_engine_str(ei)
s = s + '\n' + engine_str + en + ': ' + str(ev)
if len(self.elist) > self.tb_limit:
s = s + '\n.... %i more exceptions ...' % (len(self.elist) - self.tb_limit)
s = s + f'\n.... {len(self.elist) - self.tb_limit} more exceptions ...'
return s

def __repr__(self):
return "CompositeError(%i)" % len(self.elist)
return f"CompositeError({len(self.elist)})"

def render_traceback(self, excid=None):
"""render one or all of my tracebacks to a list of lines"""
Expand All @@ -177,13 +177,13 @@ def render_traceback(self, excid=None):
lines.append('')
if len(self.elist) > self.tb_limit:
lines.append(
'... %i more exceptions ...' % (len(self.elist) - self.tb_limit)
f'... {len(self.elist) - self.tb_limit} more exceptions ...'
)
else:
try:
en, ev, etb, ei = self.elist[excid]
except Exception:
raise IndexError("an exception with index %i does not exist" % excid)
raise IndexError(f"an exception with index {excid} does not exist")
else:
lines.append(self._get_engine_str(ei) + ":")
lines.extend((etb or 'No traceback available').splitlines())
Expand All @@ -197,7 +197,7 @@ def raise_exception(self, excid=0):
try:
en, ev, etb, ei = self.elist[excid]
except Exception:
raise IndexError("an exception with index %i does not exist" % excid)
raise IndexError(f"an exception with index {excid} does not exist")
else:
raise RemoteError(en, ev, etb, ei)

Expand Down
2 changes: 1 addition & 1 deletion ipyparallel/tests/test_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ def test_execute_reply(self):
e0.block = True
ar = e0.execute("5", silent=False)
er = ar.get()
assert str(er) == "<ExecuteReply[%i]: 5>" % er.execution_count
assert str(er) == f"<ExecuteReply[{er.execution_count}]: 5>"
assert er.execute_result['data']['text/plain'] == '5'

def test_execute_reply_rich(self):
Expand Down
2 changes: 1 addition & 1 deletion ipyparallel/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def signal_children(children):

def terminate_children(sig, frame):
log = get_logger()
log.critical("Got signal %i, terminating children..." % sig)
log.critical("Got signal %i, terminating children...", sig)
for child in children:
child.terminate()

Expand Down

0 comments on commit ba2af11

Please sign in to comment.