+def _raw_write_bwlimit(f, buf, bwcount, bwtime):
+ if not bwlimit:
+ f.write(buf)
+ return (len(buf), time.time())
+ else:
+ # We want to write in reasonably large blocks, but not so large that
+ # they're likely to overflow a router's queue. So our bwlimit timing
+ # has to be pretty granular. Also, if it takes too long from one
+ # transmit to the next, we can't just make up for lost time to bring
+ # the average back up to bwlimit - that will risk overflowing the
+ # outbound queue, which defeats the purpose. So if we fall behind
+ # by more than one block delay, we shouldn't ever try to catch up.
+ for i in range(0,len(buf),4096):
+ now = time.time()
+ next = max(now, bwtime + 1.0*bwcount/bwlimit)
+ time.sleep(next-now)
+ sub = buf[i:i+4096]
+ f.write(sub)
+ bwcount = len(sub) # might be less than 4096
+ bwtime = next
+ return (bwcount, bwtime)
+
+
+_protocol_rs = br'([a-z]+)://'
+_host_rs = br'(?P<sb>\[)?((?(sb)[0-9a-f:]+|[^:/]+))(?(sb)\])'
+_port_rs = br'(?::(\d+))?'
+_path_rs = br'(/.*)?'
+_url_rx = re.compile(br'%s(?:%s%s)?%s' % (_protocol_rs, _host_rs, _port_rs, _path_rs),
+ re.I)
+
+def parse_remote(remote):
+ url_match = _url_rx.match(remote)
+ if url_match:
+ if not url_match.group(1) in (b'ssh', b'bup', b'file'):
+ raise ClientError('unexpected protocol: %s'
+ % url_match.group(1).decode('ascii'))
+ return url_match.group(1,3,4,5)
+ else:
+ rs = remote.split(b':', 1)
+ if len(rs) == 1 or rs[0] in (b'', b'-'):
+ return b'file', None, None, rs[-1]
+ else:
+ return b'ssh', rs[0], None, rs[1]
+
+