]> arthur.barton.de Git - bup.git/blob - lib/bup/client.py
Make zlib compression level a parameter for Client
[bup.git] / lib / bup / client.py
1 import re, struct, errno, time, zlib
2 from bup import git, ssh
3 from bup.helpers import *
4
5 bwlimit = None
6
7
8 class ClientError(Exception):
9     pass
10
11
12 def _raw_write_bwlimit(f, buf, bwcount, bwtime):
13     if not bwlimit:
14         f.write(buf)
15         return (len(buf), time.time())
16     else:
17         # We want to write in reasonably large blocks, but not so large that
18         # they're likely to overflow a router's queue.  So our bwlimit timing
19         # has to be pretty granular.  Also, if it takes too long from one
20         # transmit to the next, we can't just make up for lost time to bring
21         # the average back up to bwlimit - that will risk overflowing the
22         # outbound queue, which defeats the purpose.  So if we fall behind
23         # by more than one block delay, we shouldn't ever try to catch up.
24         for i in xrange(0,len(buf),4096):
25             now = time.time()
26             next = max(now, bwtime + 1.0*bwcount/bwlimit)
27             time.sleep(next-now)
28             sub = buf[i:i+4096]
29             f.write(sub)
30             bwcount = len(sub)  # might be less than 4096
31             bwtime = next
32         return (bwcount, bwtime)
33
34
35 def parse_remote(remote):
36     protocol = r'([a-z]+)://'
37     host = r'(?P<sb>\[)?((?(sb)[0-9a-f:]+|[^:/]+))(?(sb)\])'
38     port = r'(?::(\d+))?'
39     path = r'(/.*)?'
40     url_match = re.match(
41             '%s(?:%s%s)?%s' % (protocol, host, port, path), remote, re.I)
42     if url_match:
43         assert(url_match.group(1) in ('ssh', 'bup', 'file'))
44         return url_match.group(1,3,4,5)
45     else:
46         rs = remote.split(':', 1)
47         if len(rs) == 1 or rs[0] in ('', '-'):
48             return 'file', None, None, rs[-1]
49         else:
50             return 'ssh', rs[0], None, rs[1]
51
52
53 class Client:
54     def __init__(self, remote, create=False, compression_level=1):
55         self._busy = self.conn = None
56         self.sock = self.p = self.pout = self.pin = None
57         self.compression_level = compression_level
58         is_reverse = os.environ.get('BUP_SERVER_REVERSE')
59         if is_reverse:
60             assert(not remote)
61             remote = '%s:' % is_reverse
62         (self.protocol, self.host, self.port, self.dir) = parse_remote(remote)
63         self.cachedir = git.repo('index-cache/%s'
64                                  % re.sub(r'[^@\w]', '_', 
65                                           "%s:%s" % (self.host, self.dir)))
66         if is_reverse:
67             self.pout = os.fdopen(3, 'rb')
68             self.pin = os.fdopen(4, 'wb')
69             self.conn = Conn(self.pout, self.pin)
70         else:
71             if self.protocol in ('ssh', 'file'):
72                 try:
73                     # FIXME: ssh and file shouldn't use the same module
74                     self.p = ssh.connect(self.host, self.port, 'server')
75                     self.pout = self.p.stdout
76                     self.pin = self.p.stdin
77                     self.conn = Conn(self.pout, self.pin)
78                 except OSError, e:
79                     raise ClientError, 'connect: %s' % e, sys.exc_info()[2]
80             elif self.protocol == 'bup':
81                 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
82                 self.sock.connect((self.host, atoi(self.port) or 1982))
83                 self.sockw = self.sock.makefile('wb')
84                 self.conn = DemuxConn(self.sock.fileno(), self.sockw)
85         if self.dir:
86             self.dir = re.sub(r'[\r\n]', ' ', self.dir)
87             if create:
88                 self.conn.write('init-dir %s\n' % self.dir)
89             else:
90                 self.conn.write('set-dir %s\n' % self.dir)
91             self.check_ok()
92         self.sync_indexes()
93
94     def __del__(self):
95         try:
96             self.close()
97         except IOError, e:
98             if e.errno == errno.EPIPE:
99                 pass
100             else:
101                 raise
102
103     def close(self):
104         if self.conn and not self._busy:
105             self.conn.write('quit\n')
106         if self.pin:
107             self.pin.close()
108         if self.sock and self.sockw:
109             self.sockw.close()
110             self.sock.shutdown(socket.SHUT_WR)
111         if self.conn:
112             self.conn.close()
113         if self.pout:
114             self.pout.close()
115         if self.sock:
116             self.sock.close()
117         if self.p:
118             self.p.wait()
119             rv = self.p.wait()
120             if rv:
121                 raise ClientError('server tunnel returned exit code %d' % rv)
122         self.conn = None
123         self.sock = self.p = self.pin = self.pout = None
124
125     def check_ok(self):
126         if self.p:
127             rv = self.p.poll()
128             if rv != None:
129                 raise ClientError('server exited unexpectedly with code %r'
130                                   % rv)
131         try:
132             return self.conn.check_ok()
133         except Exception, e:
134             raise ClientError, e, sys.exc_info()[2]
135
136     def check_busy(self):
137         if self._busy:
138             raise ClientError('already busy with command %r' % self._busy)
139         
140     def ensure_busy(self):
141         if not self._busy:
142             raise ClientError('expected to be busy, but not busy?!')
143         
144     def _not_busy(self):
145         self._busy = None
146
147     def sync_indexes(self):
148         self.check_busy()
149         conn = self.conn
150         mkdirp(self.cachedir)
151         # All cached idxs are extra until proven otherwise
152         extra = set()
153         for f in os.listdir(self.cachedir):
154             debug1('%s\n' % f)
155             if f.endswith('.idx'):
156                 extra.add(f)
157         needed = set()
158         conn.write('list-indexes\n')
159         for line in linereader(conn):
160             if not line:
161                 break
162             assert(line.find('/') < 0)
163             parts = line.split(' ')
164             idx = parts[0]
165             if len(parts) == 2 and parts[1] == 'load' and idx not in extra:
166                 # If the server requests that we load an idx and we don't
167                 # already have a copy of it, it is needed
168                 needed.add(idx)
169             # Any idx that the server has heard of is proven not extra
170             extra.discard(idx)
171
172         self.check_ok()
173         debug1('client: removing extra indexes: %s\n' % extra)
174         for idx in extra:
175             os.unlink(os.path.join(self.cachedir, idx))
176         debug1('client: server requested load of: %s\n' % needed)
177         for idx in needed:
178             self.sync_index(idx)
179         git.auto_midx(self.cachedir)
180
181     def sync_index(self, name):
182         #debug1('requesting %r\n' % name)
183         self.check_busy()
184         mkdirp(self.cachedir)
185         fn = os.path.join(self.cachedir, name)
186         if os.path.exists(fn):
187             msg = "won't request existing .idx, try `bup bloom --check %s`" % fn
188             raise ClientError(msg)
189         self.conn.write('send-index %s\n' % name)
190         n = struct.unpack('!I', self.conn.read(4))[0]
191         assert(n)
192         f = open(fn + '.tmp', 'w')
193         count = 0
194         progress('Receiving index from server: %d/%d\r' % (count, n))
195         for b in chunkyreader(self.conn, n):
196             f.write(b)
197             count += len(b)
198             qprogress('Receiving index from server: %d/%d\r' % (count, n))
199         progress('Receiving index from server: %d/%d, done.\n' % (count, n))
200         self.check_ok()
201         f.close()
202         os.rename(fn + '.tmp', fn)
203
204     def _make_objcache(self):
205         return git.PackIdxList(self.cachedir)
206
207     def _suggest_packs(self):
208         ob = self._busy
209         if ob:
210             assert(ob == 'receive-objects-v2')
211             self.conn.write('\xff\xff\xff\xff')  # suspend receive-objects-v2
212         suggested = []
213         for line in linereader(self.conn):
214             if not line:
215                 break
216             debug2('%s\n' % line)
217             if line.startswith('index '):
218                 idx = line[6:]
219                 debug1('client: received index suggestion: %s\n'
220                        % git.shorten_hash(idx))
221                 suggested.append(idx)
222             else:
223                 assert(line.endswith('.idx'))
224                 debug1('client: completed writing pack, idx: %s\n'
225                        % git.shorten_hash(line))
226                 suggested.append(line)
227         self.check_ok()
228         if ob:
229             self._busy = None
230         idx = None
231         for idx in suggested:
232             self.sync_index(idx)
233         git.auto_midx(self.cachedir)
234         if ob:
235             self._busy = ob
236             self.conn.write('%s\n' % ob)
237         return idx
238
239     def new_packwriter(self):
240         self.check_busy()
241         def _set_busy():
242             self._busy = 'receive-objects-v2'
243             self.conn.write('receive-objects-v2\n')
244         return PackWriter_Remote(self.conn,
245                                  objcache_maker = self._make_objcache,
246                                  suggest_packs = self._suggest_packs,
247                                  onopen = _set_busy,
248                                  onclose = self._not_busy,
249                                  ensure_busy = self.ensure_busy,
250                                  compression_level = self.compression_level)
251
252     def read_ref(self, refname):
253         self.check_busy()
254         self.conn.write('read-ref %s\n' % refname)
255         r = self.conn.readline().strip()
256         self.check_ok()
257         if r:
258             assert(len(r) == 40)   # hexified sha
259             return r.decode('hex')
260         else:
261             return None   # nonexistent ref
262
263     def update_ref(self, refname, newval, oldval):
264         self.check_busy()
265         self.conn.write('update-ref %s\n%s\n%s\n' 
266                         % (refname, newval.encode('hex'),
267                            (oldval or '').encode('hex')))
268         self.check_ok()
269
270     def cat(self, id):
271         self.check_busy()
272         self._busy = 'cat'
273         self.conn.write('cat %s\n' % re.sub(r'[\n\r]', '_', id))
274         while 1:
275             sz = struct.unpack('!I', self.conn.read(4))[0]
276             if not sz: break
277             yield self.conn.read(sz)
278         e = self.check_ok()
279         self._not_busy()
280         if e:
281             raise KeyError(str(e))
282
283
284 class PackWriter_Remote(git.PackWriter):
285     def __init__(self, conn, objcache_maker, suggest_packs,
286                  onopen, onclose,
287                  ensure_busy,
288                  compression_level=1):
289         git.PackWriter.__init__(self, objcache_maker)
290         self.file = conn
291         self.filename = 'remote socket'
292         self.suggest_packs = suggest_packs
293         self.onopen = onopen
294         self.onclose = onclose
295         self.ensure_busy = ensure_busy
296         self._packopen = False
297         self._bwcount = 0
298         self._bwtime = time.time()
299         self.compression_level = compression_level
300
301     def _open(self):
302         if not self._packopen:
303             self.onopen()
304             self._packopen = True
305
306     def _end(self):
307         if self._packopen and self.file:
308             self.file.write('\0\0\0\0')
309             self._packopen = False
310             self.onclose() # Unbusy
311             self.objcache = None
312             return self.suggest_packs() # Returns last idx received
313
314     def close(self):
315         id = self._end()
316         self.file = None
317         return id
318
319     def abort(self):
320         raise ClientError("don't know how to abort remote pack writing")
321
322     def _raw_write(self, datalist, sha):
323         assert(self.file)
324         if not self._packopen:
325             self._open()
326         self.ensure_busy()
327         data = ''.join(datalist)
328         assert(data)
329         assert(sha)
330         crc = zlib.crc32(data) & 0xffffffff
331         outbuf = ''.join((struct.pack('!I', len(data) + 20 + 4),
332                           sha,
333                           struct.pack('!I', crc),
334                           data))
335         try:
336             (self._bwcount, self._bwtime) = _raw_write_bwlimit(
337                     self.file, outbuf, self._bwcount, self._bwtime)
338         except IOError, e:
339             raise ClientError, e, sys.exc_info()[2]
340         self.outbytes += len(data)
341         self.count += 1
342
343         if self.file.has_input():
344             self.suggest_packs()
345             self.objcache.refresh()
346
347         return sha, crc