1 import re, struct, errno, time, zlib
2 from bup import git, ssh
3 from bup.helpers import *
8 class ClientError(Exception):
12 def _raw_write_bwlimit(f, buf, bwcount, bwtime):
15 return (len(buf), time.time())
17 # We want to write in reasonably large blocks, but not so large that
18 # they're likely to overflow a router's queue. So our bwlimit timing
19 # has to be pretty granular. Also, if it takes too long from one
20 # transmit to the next, we can't just make up for lost time to bring
21 # the average back up to bwlimit - that will risk overflowing the
22 # outbound queue, which defeats the purpose. So if we fall behind
23 # by more than one block delay, we shouldn't ever try to catch up.
24 for i in xrange(0,len(buf),4096):
26 next = max(now, bwtime + 1.0*bwcount/bwlimit)
30 bwcount = len(sub) # might be less than 4096
32 return (bwcount, bwtime)
35 def parse_remote(remote):
36 protocol = r'([a-z]+)://'
37 host = r'(?P<sb>\[)?((?(sb)[0-9a-f:]+|[^:/]+))(?(sb)\])'
41 '%s(?:%s%s)?%s' % (protocol, host, port, path), remote, re.I)
43 assert(url_match.group(1) in ('ssh', 'bup', 'file'))
44 return url_match.group(1,3,4,5)
46 rs = remote.split(':', 1)
47 if len(rs) == 1 or rs[0] in ('', '-'):
48 return 'file', None, None, rs[-1]
50 return 'ssh', rs[0], None, rs[1]
54 def __init__(self, remote, create=False, compression_level=1):
55 self._busy = self.conn = None
56 self.sock = self.p = self.pout = self.pin = None
57 self.compression_level = compression_level
58 is_reverse = os.environ.get('BUP_SERVER_REVERSE')
61 remote = '%s:' % is_reverse
62 (self.protocol, self.host, self.port, self.dir) = parse_remote(remote)
63 self.cachedir = git.repo('index-cache/%s'
64 % re.sub(r'[^@\w]', '_',
65 "%s:%s" % (self.host, self.dir)))
67 self.pout = os.fdopen(3, 'rb')
68 self.pin = os.fdopen(4, 'wb')
69 self.conn = Conn(self.pout, self.pin)
71 if self.protocol in ('ssh', 'file'):
73 # FIXME: ssh and file shouldn't use the same module
74 self.p = ssh.connect(self.host, self.port, 'server')
75 self.pout = self.p.stdout
76 self.pin = self.p.stdin
77 self.conn = Conn(self.pout, self.pin)
79 raise ClientError, 'connect: %s' % e, sys.exc_info()[2]
80 elif self.protocol == 'bup':
81 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
82 self.sock.connect((self.host, atoi(self.port) or 1982))
83 self.sockw = self.sock.makefile('wb')
84 self.conn = DemuxConn(self.sock.fileno(), self.sockw)
86 self.dir = re.sub(r'[\r\n]', ' ', self.dir)
88 self.conn.write('init-dir %s\n' % self.dir)
90 self.conn.write('set-dir %s\n' % self.dir)
98 if e.errno == errno.EPIPE:
104 if self.conn and not self._busy:
105 self.conn.write('quit\n')
108 if self.sock and self.sockw:
110 self.sock.shutdown(socket.SHUT_WR)
121 raise ClientError('server tunnel returned exit code %d' % rv)
123 self.sock = self.p = self.pin = self.pout = None
129 raise ClientError('server exited unexpectedly with code %r'
132 return self.conn.check_ok()
134 raise ClientError, e, sys.exc_info()[2]
136 def check_busy(self):
138 raise ClientError('already busy with command %r' % self._busy)
140 def ensure_busy(self):
142 raise ClientError('expected to be busy, but not busy?!')
147 def sync_indexes(self):
150 mkdirp(self.cachedir)
151 # All cached idxs are extra until proven otherwise
153 for f in os.listdir(self.cachedir):
155 if f.endswith('.idx'):
158 conn.write('list-indexes\n')
159 for line in linereader(conn):
162 assert(line.find('/') < 0)
163 parts = line.split(' ')
165 if len(parts) == 2 and parts[1] == 'load' and idx not in extra:
166 # If the server requests that we load an idx and we don't
167 # already have a copy of it, it is needed
169 # Any idx that the server has heard of is proven not extra
173 debug1('client: removing extra indexes: %s\n' % extra)
175 os.unlink(os.path.join(self.cachedir, idx))
176 debug1('client: server requested load of: %s\n' % needed)
179 git.auto_midx(self.cachedir)
181 def sync_index(self, name):
182 #debug1('requesting %r\n' % name)
184 mkdirp(self.cachedir)
185 fn = os.path.join(self.cachedir, name)
186 if os.path.exists(fn):
187 msg = "won't request existing .idx, try `bup bloom --check %s`" % fn
188 raise ClientError(msg)
189 self.conn.write('send-index %s\n' % name)
190 n = struct.unpack('!I', self.conn.read(4))[0]
192 f = open(fn + '.tmp', 'w')
194 progress('Receiving index from server: %d/%d\r' % (count, n))
195 for b in chunkyreader(self.conn, n):
198 qprogress('Receiving index from server: %d/%d\r' % (count, n))
199 progress('Receiving index from server: %d/%d, done.\n' % (count, n))
202 os.rename(fn + '.tmp', fn)
204 def _make_objcache(self):
205 return git.PackIdxList(self.cachedir)
207 def _suggest_packs(self):
210 assert(ob == 'receive-objects-v2')
211 self.conn.write('\xff\xff\xff\xff') # suspend receive-objects-v2
213 for line in linereader(self.conn):
216 debug2('%s\n' % line)
217 if line.startswith('index '):
219 debug1('client: received index suggestion: %s\n'
220 % git.shorten_hash(idx))
221 suggested.append(idx)
223 assert(line.endswith('.idx'))
224 debug1('client: completed writing pack, idx: %s\n'
225 % git.shorten_hash(line))
226 suggested.append(line)
231 for idx in suggested:
233 git.auto_midx(self.cachedir)
236 self.conn.write('%s\n' % ob)
239 def new_packwriter(self):
242 self._busy = 'receive-objects-v2'
243 self.conn.write('receive-objects-v2\n')
244 return PackWriter_Remote(self.conn,
245 objcache_maker = self._make_objcache,
246 suggest_packs = self._suggest_packs,
248 onclose = self._not_busy,
249 ensure_busy = self.ensure_busy,
250 compression_level = self.compression_level)
252 def read_ref(self, refname):
254 self.conn.write('read-ref %s\n' % refname)
255 r = self.conn.readline().strip()
258 assert(len(r) == 40) # hexified sha
259 return r.decode('hex')
261 return None # nonexistent ref
263 def update_ref(self, refname, newval, oldval):
265 self.conn.write('update-ref %s\n%s\n%s\n'
266 % (refname, newval.encode('hex'),
267 (oldval or '').encode('hex')))
273 self.conn.write('cat %s\n' % re.sub(r'[\n\r]', '_', id))
275 sz = struct.unpack('!I', self.conn.read(4))[0]
277 yield self.conn.read(sz)
281 raise KeyError(str(e))
284 class PackWriter_Remote(git.PackWriter):
285 def __init__(self, conn, objcache_maker, suggest_packs,
288 compression_level=1):
289 git.PackWriter.__init__(self, objcache_maker)
291 self.filename = 'remote socket'
292 self.suggest_packs = suggest_packs
294 self.onclose = onclose
295 self.ensure_busy = ensure_busy
296 self._packopen = False
298 self._bwtime = time.time()
299 self.compression_level = compression_level
302 if not self._packopen:
304 self._packopen = True
307 if self._packopen and self.file:
308 self.file.write('\0\0\0\0')
309 self._packopen = False
310 self.onclose() # Unbusy
312 return self.suggest_packs() # Returns last idx received
320 raise ClientError("don't know how to abort remote pack writing")
322 def _raw_write(self, datalist, sha):
324 if not self._packopen:
327 data = ''.join(datalist)
330 crc = zlib.crc32(data) & 0xffffffff
331 outbuf = ''.join((struct.pack('!I', len(data) + 20 + 4),
333 struct.pack('!I', crc),
336 (self._bwcount, self._bwtime) = _raw_write_bwlimit(
337 self.file, outbuf, self._bwcount, self._bwtime)
339 raise ClientError, e, sys.exc_info()[2]
340 self.outbytes += len(data)
343 if self.file.has_input():
345 self.objcache.refresh()