##// END OF EJS Templates
Turn capabilities into a mutable set, instead of a fixed tuple.
Bryan O'Sullivan -
r5258:b534c502 default
parent child Browse files
Show More
@@ -1,454 +1,454 b''
1 1 # httprepo.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from remoterepo import *
11 11 from i18n import _
12 12 import repo, os, urllib, urllib2, urlparse, zlib, util, httplib
13 13 import errno, keepalive, tempfile, socket, changegroup
14 14
15 15 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
16 16 def __init__(self, ui):
17 17 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
18 18 self.ui = ui
19 19
20 20 def find_user_password(self, realm, authuri):
21 21 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
22 22 self, realm, authuri)
23 23 user, passwd = authinfo
24 24 if user and passwd:
25 25 return (user, passwd)
26 26
27 27 if not self.ui.interactive:
28 28 raise util.Abort(_('http authorization required'))
29 29
30 30 self.ui.write(_("http authorization required\n"))
31 31 self.ui.status(_("realm: %s\n") % realm)
32 32 if user:
33 33 self.ui.status(_("user: %s\n") % user)
34 34 else:
35 35 user = self.ui.prompt(_("user:"), default=None)
36 36
37 37 if not passwd:
38 38 passwd = self.ui.getpass()
39 39
40 40 self.add_password(realm, authuri, user, passwd)
41 41 return (user, passwd)
42 42
43 43 def netlocsplit(netloc):
44 44 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
45 45
46 46 a = netloc.find('@')
47 47 if a == -1:
48 48 user, passwd = None, None
49 49 else:
50 50 userpass, netloc = netloc[:a], netloc[a+1:]
51 51 c = userpass.find(':')
52 52 if c == -1:
53 53 user, passwd = urllib.unquote(userpass), None
54 54 else:
55 55 user = urllib.unquote(userpass[:c])
56 56 passwd = urllib.unquote(userpass[c+1:])
57 57 c = netloc.find(':')
58 58 if c == -1:
59 59 host, port = netloc, None
60 60 else:
61 61 host, port = netloc[:c], netloc[c+1:]
62 62 return host, port, user, passwd
63 63
64 64 def netlocunsplit(host, port, user=None, passwd=None):
65 65 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
66 66 if port:
67 67 hostport = host + ':' + port
68 68 else:
69 69 hostport = host
70 70 if user:
71 71 if passwd:
72 72 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
73 73 else:
74 74 userpass = urllib.quote(user)
75 75 return userpass + '@' + hostport
76 76 return hostport
77 77
78 78 # work around a bug in Python < 2.4.2
79 79 # (it leaves a "\n" at the end of Proxy-authorization headers)
80 80 class request(urllib2.Request):
81 81 def add_header(self, key, val):
82 82 if key.lower() == 'proxy-authorization':
83 83 val = val.strip()
84 84 return urllib2.Request.add_header(self, key, val)
85 85
86 86 class httpsendfile(file):
87 87 def __len__(self):
88 88 return os.fstat(self.fileno()).st_size
89 89
90 90 def _gen_sendfile(connection):
91 91 def _sendfile(self, data):
92 92 # send a file
93 93 if isinstance(data, httpsendfile):
94 94 # if auth required, some data sent twice, so rewind here
95 95 data.seek(0)
96 96 for chunk in util.filechunkiter(data):
97 97 connection.send(self, chunk)
98 98 else:
99 99 connection.send(self, data)
100 100 return _sendfile
101 101
102 102 class httpconnection(keepalive.HTTPConnection):
103 103 # must be able to send big bundle as stream.
104 104 send = _gen_sendfile(keepalive.HTTPConnection)
105 105
106 106 class basehttphandler(keepalive.HTTPHandler):
107 107 def http_open(self, req):
108 108 return self.do_open(httpconnection, req)
109 109
110 110 has_https = hasattr(urllib2, 'HTTPSHandler')
111 111 if has_https:
112 112 class httpsconnection(httplib.HTTPSConnection):
113 113 response_class = keepalive.HTTPResponse
114 114 # must be able to send big bundle as stream.
115 115 send = _gen_sendfile(httplib.HTTPSConnection)
116 116
117 117 class httphandler(basehttphandler, urllib2.HTTPSHandler):
118 118 def https_open(self, req):
119 119 return self.do_open(httpsconnection, req)
120 120 else:
121 121 class httphandler(basehttphandler):
122 122 pass
123 123
124 124 # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
125 125 # it doesn't know about the auth type requested. This can happen if
126 126 # somebody is using BasicAuth and types a bad password.
127 127 class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
128 128 def http_error_auth_reqed(self, auth_header, host, req, headers):
129 129 try:
130 130 return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
131 131 self, auth_header, host, req, headers)
132 132 except ValueError, inst:
133 133 arg = inst.args[0]
134 134 if arg.startswith("AbstractDigestAuthHandler doesn't know "):
135 135 return
136 136 raise
137 137
138 138 def zgenerator(f):
139 139 zd = zlib.decompressobj()
140 140 try:
141 141 for chunk in util.filechunkiter(f):
142 142 yield zd.decompress(chunk)
143 143 except httplib.HTTPException, inst:
144 144 raise IOError(None, _('connection ended unexpectedly'))
145 145 yield zd.flush()
146 146
147 147 _safe = ('abcdefghijklmnopqrstuvwxyz'
148 148 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
149 149 '0123456789' '_.-/')
150 150 _safeset = None
151 151 _hex = None
152 152 def quotepath(path):
153 153 '''quote the path part of a URL
154 154
155 155 This is similar to urllib.quote, but it also tries to avoid
156 156 quoting things twice (inspired by wget):
157 157
158 158 >>> quotepath('abc def')
159 159 'abc%20def'
160 160 >>> quotepath('abc%20def')
161 161 'abc%20def'
162 162 >>> quotepath('abc%20 def')
163 163 'abc%20%20def'
164 164 >>> quotepath('abc def%20')
165 165 'abc%20def%20'
166 166 >>> quotepath('abc def%2')
167 167 'abc%20def%252'
168 168 >>> quotepath('abc def%')
169 169 'abc%20def%25'
170 170 '''
171 171 global _safeset, _hex
172 172 if _safeset is None:
173 173 _safeset = util.set(_safe)
174 174 _hex = util.set('abcdefABCDEF0123456789')
175 175 l = list(path)
176 176 for i in xrange(len(l)):
177 177 c = l[i]
178 178 if c == '%' and i + 2 < len(l) and (l[i+1] in _hex and l[i+2] in _hex):
179 179 pass
180 180 elif c not in _safeset:
181 181 l[i] = '%%%02X' % ord(c)
182 182 return ''.join(l)
183 183
184 184 class httprepository(remoterepository):
185 185 def __init__(self, ui, path):
186 186 self.path = path
187 187 self.caps = None
188 188 self.handler = None
189 189 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
190 190 if query or frag:
191 191 raise util.Abort(_('unsupported URL component: "%s"') %
192 192 (query or frag))
193 193 if not urlpath:
194 194 urlpath = '/'
195 195 urlpath = quotepath(urlpath)
196 196 host, port, user, passwd = netlocsplit(netloc)
197 197
198 198 # urllib cannot handle URLs with embedded user or passwd
199 199 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
200 200 urlpath, '', ''))
201 201 self.ui = ui
202 202 self.ui.debug(_('using %s\n') % self._url)
203 203
204 204 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
205 205 # XXX proxyauthinfo = None
206 206 self.handler = httphandler()
207 207 handlers = [self.handler]
208 208
209 209 if proxyurl:
210 210 # proxy can be proper url or host[:port]
211 211 if not (proxyurl.startswith('http:') or
212 212 proxyurl.startswith('https:')):
213 213 proxyurl = 'http://' + proxyurl + '/'
214 214 snpqf = urlparse.urlsplit(proxyurl)
215 215 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
216 216 hpup = netlocsplit(proxynetloc)
217 217
218 218 proxyhost, proxyport, proxyuser, proxypasswd = hpup
219 219 if not proxyuser:
220 220 proxyuser = ui.config("http_proxy", "user")
221 221 proxypasswd = ui.config("http_proxy", "passwd")
222 222
223 223 # see if we should use a proxy for this url
224 224 no_list = [ "localhost", "127.0.0.1" ]
225 225 no_list.extend([p.lower() for
226 226 p in ui.configlist("http_proxy", "no")])
227 227 no_list.extend([p.strip().lower() for
228 228 p in os.getenv("no_proxy", '').split(',')
229 229 if p.strip()])
230 230 # "http_proxy.always" config is for running tests on localhost
231 231 if (not ui.configbool("http_proxy", "always") and
232 232 host.lower() in no_list):
233 233 ui.debug(_('disabling proxy for %s\n') % host)
234 234 else:
235 235 proxyurl = urlparse.urlunsplit((
236 236 proxyscheme, netlocunsplit(proxyhost, proxyport,
237 237 proxyuser, proxypasswd or ''),
238 238 proxypath, proxyquery, proxyfrag))
239 239 handlers.append(urllib2.ProxyHandler({scheme: proxyurl}))
240 240 ui.debug(_('proxying through http://%s:%s\n') %
241 241 (proxyhost, proxyport))
242 242
243 243 # urllib2 takes proxy values from the environment and those
244 244 # will take precedence if found, so drop them
245 245 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
246 246 try:
247 247 if os.environ.has_key(env):
248 248 del os.environ[env]
249 249 except OSError:
250 250 pass
251 251
252 252 passmgr = passwordmgr(ui)
253 253 if user:
254 254 ui.debug(_('http auth: user %s, password %s\n') %
255 255 (user, passwd and '*' * len(passwd) or 'not set'))
256 256 passmgr.add_password(None, self._url, user, passwd or '')
257 257
258 258 handlers.extend((urllib2.HTTPBasicAuthHandler(passmgr),
259 259 httpdigestauthhandler(passmgr)))
260 260 opener = urllib2.build_opener(*handlers)
261 261
262 262 # 1.0 here is the _protocol_ version
263 263 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
264 264 urllib2.install_opener(opener)
265 265
266 266 def __del__(self):
267 267 if self.handler:
268 268 self.handler.close_all()
269 269 self.handler = None
270 270
271 271 def url(self):
272 272 return self.path
273 273
274 274 # look up capabilities only when needed
275 275
276 276 def get_caps(self):
277 277 if self.caps is None:
278 278 try:
279 self.caps = self.do_read('capabilities').split()
279 self.caps = util.set(self.do_read('capabilities').split())
280 280 except repo.RepoError:
281 self.caps = ()
281 self.caps = util.set()
282 282 self.ui.debug(_('capabilities: %s\n') %
283 283 (' '.join(self.caps or ['none'])))
284 284 return self.caps
285 285
286 286 capabilities = property(get_caps)
287 287
288 288 def lock(self):
289 289 raise util.Abort(_('operation not supported over http'))
290 290
291 291 def do_cmd(self, cmd, **args):
292 292 data = args.pop('data', None)
293 293 headers = args.pop('headers', {})
294 294 self.ui.debug(_("sending %s command\n") % cmd)
295 295 q = {"cmd": cmd}
296 296 q.update(args)
297 297 qs = '?%s' % urllib.urlencode(q)
298 298 cu = "%s%s" % (self._url, qs)
299 299 try:
300 300 if data:
301 301 self.ui.debug(_("sending %s bytes\n") %
302 302 headers.get('content-length', 'X'))
303 303 resp = urllib2.urlopen(request(cu, data, headers))
304 304 except urllib2.HTTPError, inst:
305 305 if inst.code == 401:
306 306 raise util.Abort(_('authorization failed'))
307 307 raise
308 308 except httplib.HTTPException, inst:
309 309 self.ui.debug(_('http error while sending %s command\n') % cmd)
310 310 self.ui.print_exc()
311 311 raise IOError(None, inst)
312 312 except IndexError:
313 313 # this only happens with Python 2.3, later versions raise URLError
314 314 raise util.Abort(_('http error, possibly caused by proxy setting'))
315 315 # record the url we got redirected to
316 316 resp_url = resp.geturl()
317 317 if resp_url.endswith(qs):
318 318 resp_url = resp_url[:-len(qs)]
319 319 if self._url != resp_url:
320 320 self.ui.status(_('real URL is %s\n') % resp_url)
321 321 self._url = resp_url
322 322 try:
323 323 proto = resp.getheader('content-type')
324 324 except AttributeError:
325 325 proto = resp.headers['content-type']
326 326
327 327 # accept old "text/plain" and "application/hg-changegroup" for now
328 328 if not (proto.startswith('application/mercurial-') or
329 329 proto.startswith('text/plain') or
330 330 proto.startswith('application/hg-changegroup')):
331 331 self.ui.debug(_("Requested URL: '%s'\n") % cu)
332 332 raise repo.RepoError(_("'%s' does not appear to be an hg repository")
333 333 % self._url)
334 334
335 335 if proto.startswith('application/mercurial-'):
336 336 try:
337 337 version = proto.split('-', 1)[1]
338 338 version_info = tuple([int(n) for n in version.split('.')])
339 339 except ValueError:
340 340 raise repo.RepoError(_("'%s' sent a broken Content-type "
341 341 "header (%s)") % (self._url, proto))
342 342 if version_info > (0, 1):
343 343 raise repo.RepoError(_("'%s' uses newer protocol %s") %
344 344 (self._url, version))
345 345
346 346 return resp
347 347
348 348 def do_read(self, cmd, **args):
349 349 fp = self.do_cmd(cmd, **args)
350 350 try:
351 351 return fp.read()
352 352 finally:
353 353 # if using keepalive, allow connection to be reused
354 354 fp.close()
355 355
356 356 def lookup(self, key):
357 357 d = self.do_cmd("lookup", key = key).read()
358 358 success, data = d[:-1].split(' ', 1)
359 359 if int(success):
360 360 return bin(data)
361 361 raise repo.RepoError(data)
362 362
363 363 def heads(self):
364 364 d = self.do_read("heads")
365 365 try:
366 366 return map(bin, d[:-1].split(" "))
367 367 except:
368 368 raise util.UnexpectedOutput(_("unexpected response:"), d)
369 369
370 370 def branches(self, nodes):
371 371 n = " ".join(map(hex, nodes))
372 372 d = self.do_read("branches", nodes=n)
373 373 try:
374 374 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
375 375 return br
376 376 except:
377 377 raise util.UnexpectedOutput(_("unexpected response:"), d)
378 378
379 379 def between(self, pairs):
380 380 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
381 381 d = self.do_read("between", pairs=n)
382 382 try:
383 383 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
384 384 return p
385 385 except:
386 386 raise util.UnexpectedOutput(_("unexpected response:"), d)
387 387
388 388 def changegroup(self, nodes, kind):
389 389 n = " ".join(map(hex, nodes))
390 390 f = self.do_cmd("changegroup", roots=n)
391 391 return util.chunkbuffer(zgenerator(f))
392 392
393 393 def changegroupsubset(self, bases, heads, source):
394 394 baselst = " ".join([hex(n) for n in bases])
395 395 headlst = " ".join([hex(n) for n in heads])
396 396 f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
397 397 return util.chunkbuffer(zgenerator(f))
398 398
399 399 def unbundle(self, cg, heads, source):
400 400 # have to stream bundle to a temp file because we do not have
401 401 # http 1.1 chunked transfer.
402 402
403 403 type = ""
404 404 types = self.capable('unbundle')
405 405 # servers older than d1b16a746db6 will send 'unbundle' as a
406 406 # boolean capability
407 407 try:
408 408 types = types.split(',')
409 409 except AttributeError:
410 410 types = [""]
411 411 if types:
412 412 for x in types:
413 413 if x in changegroup.bundletypes:
414 414 type = x
415 415 break
416 416
417 417 tempname = changegroup.writebundle(cg, None, type)
418 418 fp = httpsendfile(tempname, "rb")
419 419 try:
420 420 try:
421 421 rfp = self.do_cmd(
422 422 'unbundle', data=fp,
423 423 headers={'content-type': 'application/octet-stream'},
424 424 heads=' '.join(map(hex, heads)))
425 425 try:
426 426 ret = int(rfp.readline())
427 427 self.ui.write(rfp.read())
428 428 return ret
429 429 finally:
430 430 rfp.close()
431 431 except socket.error, err:
432 432 if err[0] in (errno.ECONNRESET, errno.EPIPE):
433 433 raise util.Abort(_('push failed: %s') % err[1])
434 434 raise util.Abort(err[1])
435 435 finally:
436 436 fp.close()
437 437 os.unlink(tempname)
438 438
439 439 def stream_out(self):
440 440 return self.do_cmd('stream_out')
441 441
442 442 class httpsrepository(httprepository):
443 443 def __init__(self, ui, path):
444 444 if not has_https:
445 445 raise util.Abort(_('Python support for SSL and HTTPS '
446 446 'is not installed'))
447 447 httprepository.__init__(self, ui, path)
448 448
449 449 def instance(ui, path, create):
450 450 if create:
451 451 raise util.Abort(_('cannot create new http repository'))
452 452 if path.startswith('https:'):
453 453 return httpsrepository(ui, path)
454 454 return httprepository(ui, path)
@@ -1,1991 +1,1991 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.path = path
22 22 self.root = os.path.realpath(path)
23 23 self.path = os.path.join(self.root, ".hg")
24 24 self.origroot = path
25 25 self.opener = util.opener(self.path)
26 26 self.wopener = util.opener(self.root)
27 27
28 28 if not os.path.isdir(self.path):
29 29 if create:
30 30 if not os.path.exists(path):
31 31 os.mkdir(path)
32 32 os.mkdir(self.path)
33 33 requirements = ["revlogv1"]
34 34 if parentui.configbool('format', 'usestore', True):
35 35 os.mkdir(os.path.join(self.path, "store"))
36 36 requirements.append("store")
37 37 # create an invalid changelog
38 38 self.opener("00changelog.i", "a").write(
39 39 '\0\0\0\2' # represents revlogv2
40 40 ' dummy changelog to prevent using the old repo layout'
41 41 )
42 42 reqfile = self.opener("requires", "w")
43 43 for r in requirements:
44 44 reqfile.write("%s\n" % r)
45 45 reqfile.close()
46 46 else:
47 47 raise repo.RepoError(_("repository %s not found") % path)
48 48 elif create:
49 49 raise repo.RepoError(_("repository %s already exists") % path)
50 50 else:
51 51 # find requirements
52 52 try:
53 53 requirements = self.opener("requires").read().splitlines()
54 54 except IOError, inst:
55 55 if inst.errno != errno.ENOENT:
56 56 raise
57 57 requirements = []
58 58 # check them
59 59 for r in requirements:
60 60 if r not in self.supported:
61 61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62 62
63 63 # setup store
64 64 if "store" in requirements:
65 65 self.encodefn = util.encodefilename
66 66 self.decodefn = util.decodefilename
67 67 self.spath = os.path.join(self.path, "store")
68 68 else:
69 69 self.encodefn = lambda x: x
70 70 self.decodefn = lambda x: x
71 71 self.spath = self.path
72 72 self.sopener = util.encodedopener(util.opener(self.spath),
73 73 self.encodefn)
74 74
75 75 self.ui = ui.ui(parentui=parentui)
76 76 try:
77 77 self.ui.readconfig(self.join("hgrc"), self.root)
78 78 extensions.loadall(self.ui)
79 79 except IOError:
80 80 pass
81 81
82 82 self.tagscache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.filterpats = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError, name
102 102
103 103 def url(self):
104 104 return 'file:' + self.root
105 105
106 106 def hook(self, name, throw=False, **args):
107 107 return hook.hook(self.ui, self, name, throw, **args)
108 108
109 109 tag_disallowed = ':\r\n'
110 110
111 111 def _tag(self, name, node, message, local, user, date, parent=None,
112 112 extra={}):
113 113 use_dirstate = parent is None
114 114
115 115 for c in self.tag_disallowed:
116 116 if c in name:
117 117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 118
119 119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 120
121 121 def writetag(fp, name, munge, prevtags):
122 122 if prevtags and prevtags[-1] != '\n':
123 123 fp.write('\n')
124 124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 125 fp.close()
126 126 self.hook('tag', node=hex(node), tag=name, local=local)
127 127
128 128 prevtags = ''
129 129 if local:
130 130 try:
131 131 fp = self.opener('localtags', 'r+')
132 132 except IOError, err:
133 133 fp = self.opener('localtags', 'a')
134 134 else:
135 135 prevtags = fp.read()
136 136
137 137 # local tags are stored in the current charset
138 138 writetag(fp, name, None, prevtags)
139 139 return
140 140
141 141 if use_dirstate:
142 142 try:
143 143 fp = self.wfile('.hgtags', 'rb+')
144 144 except IOError, err:
145 145 fp = self.wfile('.hgtags', 'ab')
146 146 else:
147 147 prevtags = fp.read()
148 148 else:
149 149 try:
150 150 prevtags = self.filectx('.hgtags', parent).data()
151 151 except revlog.LookupError:
152 152 pass
153 153 fp = self.wfile('.hgtags', 'wb')
154 154 if prevtags:
155 155 fp.write(prevtags)
156 156
157 157 # committed tags are stored in UTF-8
158 158 writetag(fp, name, util.fromlocal, prevtags)
159 159
160 160 if use_dirstate and '.hgtags' not in self.dirstate:
161 161 self.add(['.hgtags'])
162 162
163 163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 164 extra=extra)
165 165
166 166 self.hook('tag', node=hex(node), tag=name, local=local)
167 167
168 168 return tagnode
169 169
170 170 def tag(self, name, node, message, local, user, date):
171 171 '''tag a revision with a symbolic name.
172 172
173 173 if local is True, the tag is stored in a per-repository file.
174 174 otherwise, it is stored in the .hgtags file, and a new
175 175 changeset is committed with the change.
176 176
177 177 keyword arguments:
178 178
179 179 local: whether to store tag in non-version-controlled file
180 180 (default False)
181 181
182 182 message: commit message to use if committing
183 183
184 184 user: name of user to use if committing
185 185
186 186 date: date tuple to use if committing'''
187 187
188 188 for x in self.status()[:5]:
189 189 if '.hgtags' in x:
190 190 raise util.Abort(_('working copy of .hgtags is changed '
191 191 '(please commit .hgtags manually)'))
192 192
193 193
194 194 self._tag(name, node, message, local, user, date)
195 195
196 196 def tags(self):
197 197 '''return a mapping of tag to node'''
198 198 if self.tagscache:
199 199 return self.tagscache
200 200
201 201 globaltags = {}
202 202
203 203 def readtags(lines, fn):
204 204 filetags = {}
205 205 count = 0
206 206
207 207 def warn(msg):
208 208 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
209 209
210 210 for l in lines:
211 211 count += 1
212 212 if not l:
213 213 continue
214 214 s = l.split(" ", 1)
215 215 if len(s) != 2:
216 216 warn(_("cannot parse entry"))
217 217 continue
218 218 node, key = s
219 219 key = util.tolocal(key.strip()) # stored in UTF-8
220 220 try:
221 221 bin_n = bin(node)
222 222 except TypeError:
223 223 warn(_("node '%s' is not well formed") % node)
224 224 continue
225 225 if bin_n not in self.changelog.nodemap:
226 226 warn(_("tag '%s' refers to unknown node") % key)
227 227 continue
228 228
229 229 h = []
230 230 if key in filetags:
231 231 n, h = filetags[key]
232 232 h.append(n)
233 233 filetags[key] = (bin_n, h)
234 234
235 235 for k, nh in filetags.items():
236 236 if k not in globaltags:
237 237 globaltags[k] = nh
238 238 continue
239 239 # we prefer the global tag if:
240 240 # it supercedes us OR
241 241 # mutual supercedes and it has a higher rank
242 242 # otherwise we win because we're tip-most
243 243 an, ah = nh
244 244 bn, bh = globaltags[k]
245 245 if (bn != an and an in bh and
246 246 (bn not in ah or len(bh) > len(ah))):
247 247 an = bn
248 248 ah.extend([n for n in bh if n not in ah])
249 249 globaltags[k] = an, ah
250 250
251 251 # read the tags file from each head, ending with the tip
252 252 f = None
253 253 for rev, node, fnode in self._hgtagsnodes():
254 254 f = (f and f.filectx(fnode) or
255 255 self.filectx('.hgtags', fileid=fnode))
256 256 readtags(f.data().splitlines(), f)
257 257
258 258 try:
259 259 data = util.fromlocal(self.opener("localtags").read())
260 260 # localtags are stored in the local character set
261 261 # while the internal tag table is stored in UTF-8
262 262 readtags(data.splitlines(), "localtags")
263 263 except IOError:
264 264 pass
265 265
266 266 self.tagscache = {}
267 267 for k,nh in globaltags.items():
268 268 n = nh[0]
269 269 if n != nullid:
270 270 self.tagscache[k] = n
271 271 self.tagscache['tip'] = self.changelog.tip()
272 272
273 273 return self.tagscache
274 274
275 275 def _hgtagsnodes(self):
276 276 heads = self.heads()
277 277 heads.reverse()
278 278 last = {}
279 279 ret = []
280 280 for node in heads:
281 281 c = self.changectx(node)
282 282 rev = c.rev()
283 283 try:
284 284 fnode = c.filenode('.hgtags')
285 285 except revlog.LookupError:
286 286 continue
287 287 ret.append((rev, node, fnode))
288 288 if fnode in last:
289 289 ret[last[fnode]] = None
290 290 last[fnode] = len(ret) - 1
291 291 return [item for item in ret if item]
292 292
293 293 def tagslist(self):
294 294 '''return a list of tags ordered by revision'''
295 295 l = []
296 296 for t, n in self.tags().items():
297 297 try:
298 298 r = self.changelog.rev(n)
299 299 except:
300 300 r = -2 # sort to the beginning of the list if unknown
301 301 l.append((r, t, n))
302 302 l.sort()
303 303 return [(t, n) for r, t, n in l]
304 304
305 305 def nodetags(self, node):
306 306 '''return the tags associated with a node'''
307 307 if not self.nodetagscache:
308 308 self.nodetagscache = {}
309 309 for t, n in self.tags().items():
310 310 self.nodetagscache.setdefault(n, []).append(t)
311 311 return self.nodetagscache.get(node, [])
312 312
313 313 def _branchtags(self):
314 314 partial, last, lrev = self._readbranchcache()
315 315
316 316 tiprev = self.changelog.count() - 1
317 317 if lrev != tiprev:
318 318 self._updatebranchcache(partial, lrev+1, tiprev+1)
319 319 self._writebranchcache(partial, self.changelog.tip(), tiprev)
320 320
321 321 return partial
322 322
323 323 def branchtags(self):
324 324 if self.branchcache is not None:
325 325 return self.branchcache
326 326
327 327 self.branchcache = {} # avoid recursion in changectx
328 328 partial = self._branchtags()
329 329
330 330 # the branch cache is stored on disk as UTF-8, but in the local
331 331 # charset internally
332 332 for k, v in partial.items():
333 333 self.branchcache[util.tolocal(k)] = v
334 334 return self.branchcache
335 335
336 336 def _readbranchcache(self):
337 337 partial = {}
338 338 try:
339 339 f = self.opener("branch.cache")
340 340 lines = f.read().split('\n')
341 341 f.close()
342 342 except (IOError, OSError):
343 343 return {}, nullid, nullrev
344 344
345 345 try:
346 346 last, lrev = lines.pop(0).split(" ", 1)
347 347 last, lrev = bin(last), int(lrev)
348 348 if not (lrev < self.changelog.count() and
349 349 self.changelog.node(lrev) == last): # sanity check
350 350 # invalidate the cache
351 351 raise ValueError('Invalid branch cache: unknown tip')
352 352 for l in lines:
353 353 if not l: continue
354 354 node, label = l.split(" ", 1)
355 355 partial[label.strip()] = bin(node)
356 356 except (KeyboardInterrupt, util.SignalInterrupt):
357 357 raise
358 358 except Exception, inst:
359 359 if self.ui.debugflag:
360 360 self.ui.warn(str(inst), '\n')
361 361 partial, last, lrev = {}, nullid, nullrev
362 362 return partial, last, lrev
363 363
364 364 def _writebranchcache(self, branches, tip, tiprev):
365 365 try:
366 366 f = self.opener("branch.cache", "w", atomictemp=True)
367 367 f.write("%s %s\n" % (hex(tip), tiprev))
368 368 for label, node in branches.iteritems():
369 369 f.write("%s %s\n" % (hex(node), label))
370 370 f.rename()
371 371 except (IOError, OSError):
372 372 pass
373 373
374 374 def _updatebranchcache(self, partial, start, end):
375 375 for r in xrange(start, end):
376 376 c = self.changectx(r)
377 377 b = c.branch()
378 378 partial[b] = c.node()
379 379
380 380 def lookup(self, key):
381 381 if key == '.':
382 382 key, second = self.dirstate.parents()
383 383 if key == nullid:
384 384 raise repo.RepoError(_("no revision checked out"))
385 385 if second != nullid:
386 386 self.ui.warn(_("warning: working directory has two parents, "
387 387 "tag '.' uses the first\n"))
388 388 elif key == 'null':
389 389 return nullid
390 390 n = self.changelog._match(key)
391 391 if n:
392 392 return n
393 393 if key in self.tags():
394 394 return self.tags()[key]
395 395 if key in self.branchtags():
396 396 return self.branchtags()[key]
397 397 n = self.changelog._partialmatch(key)
398 398 if n:
399 399 return n
400 400 try:
401 401 if len(key) == 20:
402 402 key = hex(key)
403 403 except:
404 404 pass
405 405 raise repo.RepoError(_("unknown revision '%s'") % key)
406 406
407 407 def dev(self):
408 408 return os.lstat(self.path).st_dev
409 409
410 410 def local(self):
411 411 return True
412 412
413 413 def join(self, f):
414 414 return os.path.join(self.path, f)
415 415
416 416 def sjoin(self, f):
417 417 f = self.encodefn(f)
418 418 return os.path.join(self.spath, f)
419 419
420 420 def wjoin(self, f):
421 421 return os.path.join(self.root, f)
422 422
423 423 def file(self, f):
424 424 if f[0] == '/':
425 425 f = f[1:]
426 426 return filelog.filelog(self.sopener, f)
427 427
428 428 def changectx(self, changeid=None):
429 429 return context.changectx(self, changeid)
430 430
431 431 def workingctx(self):
432 432 return context.workingctx(self)
433 433
434 434 def parents(self, changeid=None):
435 435 '''
436 436 get list of changectxs for parents of changeid or working directory
437 437 '''
438 438 if changeid is None:
439 439 pl = self.dirstate.parents()
440 440 else:
441 441 n = self.changelog.lookup(changeid)
442 442 pl = self.changelog.parents(n)
443 443 if pl[1] == nullid:
444 444 return [self.changectx(pl[0])]
445 445 return [self.changectx(pl[0]), self.changectx(pl[1])]
446 446
447 447 def filectx(self, path, changeid=None, fileid=None):
448 448 """changeid can be a changeset revision, node, or tag.
449 449 fileid can be a file revision or node."""
450 450 return context.filectx(self, path, changeid, fileid)
451 451
452 452 def getcwd(self):
453 453 return self.dirstate.getcwd()
454 454
455 455 def pathto(self, f, cwd=None):
456 456 return self.dirstate.pathto(f, cwd)
457 457
458 458 def wfile(self, f, mode='r'):
459 459 return self.wopener(f, mode)
460 460
461 461 def _link(self, f):
462 462 return os.path.islink(self.wjoin(f))
463 463
464 464 def _filter(self, filter, filename, data):
465 465 if filter not in self.filterpats:
466 466 l = []
467 467 for pat, cmd in self.ui.configitems(filter):
468 468 mf = util.matcher(self.root, "", [pat], [], [])[1]
469 469 l.append((mf, cmd))
470 470 self.filterpats[filter] = l
471 471
472 472 for mf, cmd in self.filterpats[filter]:
473 473 if mf(filename):
474 474 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
475 475 data = util.filter(data, cmd)
476 476 break
477 477
478 478 return data
479 479
480 480 def wread(self, filename):
481 481 if self._link(filename):
482 482 data = os.readlink(self.wjoin(filename))
483 483 else:
484 484 data = self.wopener(filename, 'r').read()
485 485 return self._filter("encode", filename, data)
486 486
487 487 def wwrite(self, filename, data, flags):
488 488 data = self._filter("decode", filename, data)
489 489 if "l" in flags:
490 490 self.wopener.symlink(data, filename)
491 491 else:
492 492 try:
493 493 if self._link(filename):
494 494 os.unlink(self.wjoin(filename))
495 495 except OSError:
496 496 pass
497 497 self.wopener(filename, 'w').write(data)
498 498 util.set_exec(self.wjoin(filename), "x" in flags)
499 499
500 500 def wwritedata(self, filename, data):
501 501 return self._filter("decode", filename, data)
502 502
503 503 def transaction(self):
504 504 if self._transref and self._transref():
505 505 return self._transref().nest()
506 506
507 507 # save dirstate for rollback
508 508 try:
509 509 ds = self.opener("dirstate").read()
510 510 except IOError:
511 511 ds = ""
512 512 self.opener("journal.dirstate", "w").write(ds)
513 513
514 514 renames = [(self.sjoin("journal"), self.sjoin("undo")),
515 515 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
516 516 tr = transaction.transaction(self.ui.warn, self.sopener,
517 517 self.sjoin("journal"),
518 518 aftertrans(renames))
519 519 self._transref = weakref.ref(tr)
520 520 return tr
521 521
522 522 def recover(self):
523 523 l = self.lock()
524 524 try:
525 525 if os.path.exists(self.sjoin("journal")):
526 526 self.ui.status(_("rolling back interrupted transaction\n"))
527 527 transaction.rollback(self.sopener, self.sjoin("journal"))
528 528 self.invalidate()
529 529 return True
530 530 else:
531 531 self.ui.warn(_("no interrupted transaction available\n"))
532 532 return False
533 533 finally:
534 534 del l
535 535
536 536 def rollback(self):
537 537 wlock = lock = None
538 538 try:
539 539 wlock = self.wlock()
540 540 lock = self.lock()
541 541 if os.path.exists(self.sjoin("undo")):
542 542 self.ui.status(_("rolling back last transaction\n"))
543 543 transaction.rollback(self.sopener, self.sjoin("undo"))
544 544 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
545 545 self.invalidate()
546 546 self.dirstate.invalidate()
547 547 else:
548 548 self.ui.warn(_("no rollback information available\n"))
549 549 finally:
550 550 del lock, wlock
551 551
552 552 def invalidate(self):
553 553 for a in "changelog manifest".split():
554 554 if hasattr(self, a):
555 555 self.__delattr__(a)
556 556 self.tagscache = None
557 557 self.nodetagscache = None
558 558
559 559 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
560 560 try:
561 561 l = lock.lock(lockname, 0, releasefn, desc=desc)
562 562 except lock.LockHeld, inst:
563 563 if not wait:
564 564 raise
565 565 self.ui.warn(_("waiting for lock on %s held by %r\n") %
566 566 (desc, inst.locker))
567 567 # default to 600 seconds timeout
568 568 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
569 569 releasefn, desc=desc)
570 570 if acquirefn:
571 571 acquirefn()
572 572 return l
573 573
574 574 def lock(self, wait=True):
575 575 if self._lockref and self._lockref():
576 576 return self._lockref()
577 577
578 578 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
579 579 _('repository %s') % self.origroot)
580 580 self._lockref = weakref.ref(l)
581 581 return l
582 582
583 583 def wlock(self, wait=True):
584 584 if self._wlockref and self._wlockref():
585 585 return self._wlockref()
586 586
587 587 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
588 588 self.dirstate.invalidate, _('working directory of %s') %
589 589 self.origroot)
590 590 self._wlockref = weakref.ref(l)
591 591 return l
592 592
593 593 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
594 594 """
595 595 commit an individual file as part of a larger transaction
596 596 """
597 597
598 598 t = self.wread(fn)
599 599 fl = self.file(fn)
600 600 fp1 = manifest1.get(fn, nullid)
601 601 fp2 = manifest2.get(fn, nullid)
602 602
603 603 meta = {}
604 604 cp = self.dirstate.copied(fn)
605 605 if cp:
606 606 # Mark the new revision of this file as a copy of another
607 607 # file. This copy data will effectively act as a parent
608 608 # of this new revision. If this is a merge, the first
609 609 # parent will be the nullid (meaning "look up the copy data")
610 610 # and the second one will be the other parent. For example:
611 611 #
612 612 # 0 --- 1 --- 3 rev1 changes file foo
613 613 # \ / rev2 renames foo to bar and changes it
614 614 # \- 2 -/ rev3 should have bar with all changes and
615 615 # should record that bar descends from
616 616 # bar in rev2 and foo in rev1
617 617 #
618 618 # this allows this merge to succeed:
619 619 #
620 620 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
621 621 # \ / merging rev3 and rev4 should use bar@rev2
622 622 # \- 2 --- 4 as the merge base
623 623 #
624 624 meta["copy"] = cp
625 625 if not manifest2: # not a branch merge
626 626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
627 627 fp2 = nullid
628 628 elif fp2 != nullid: # copied on remote side
629 629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 630 elif fp1 != nullid: # copied on local side, reversed
631 631 meta["copyrev"] = hex(manifest2.get(cp))
632 632 fp2 = fp1
633 633 elif cp in manifest2: # directory rename on local side
634 634 meta["copyrev"] = hex(manifest2[cp])
635 635 else: # directory rename on remote side
636 636 meta["copyrev"] = hex(manifest1.get(cp, nullid))
637 637 self.ui.debug(_(" %s: copy %s:%s\n") %
638 638 (fn, cp, meta["copyrev"]))
639 639 fp1 = nullid
640 640 elif fp2 != nullid:
641 641 # is one parent an ancestor of the other?
642 642 fpa = fl.ancestor(fp1, fp2)
643 643 if fpa == fp1:
644 644 fp1, fp2 = fp2, nullid
645 645 elif fpa == fp2:
646 646 fp2 = nullid
647 647
648 648 # is the file unmodified from the parent? report existing entry
649 649 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
650 650 return fp1
651 651
652 652 changelist.append(fn)
653 653 return fl.add(t, meta, tr, linkrev, fp1, fp2)
654 654
655 655 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
656 656 if p1 is None:
657 657 p1, p2 = self.dirstate.parents()
658 658 return self.commit(files=files, text=text, user=user, date=date,
659 659 p1=p1, p2=p2, extra=extra, empty_ok=True)
660 660
661 661 def commit(self, files=None, text="", user=None, date=None,
662 662 match=util.always, force=False, force_editor=False,
663 663 p1=None, p2=None, extra={}, empty_ok=False):
664 664 wlock = lock = tr = None
665 665 try:
666 666 commit = []
667 667 remove = []
668 668 changed = []
669 669 use_dirstate = (p1 is None) # not rawcommit
670 670 extra = extra.copy()
671 671
672 672 if use_dirstate:
673 673 if files:
674 674 for f in files:
675 675 s = self.dirstate[f]
676 676 if s in 'nma':
677 677 commit.append(f)
678 678 elif s == 'r':
679 679 remove.append(f)
680 680 else:
681 681 self.ui.warn(_("%s not tracked!\n") % f)
682 682 else:
683 683 changes = self.status(match=match)[:5]
684 684 modified, added, removed, deleted, unknown = changes
685 685 commit = modified + added
686 686 remove = removed
687 687 else:
688 688 commit = files
689 689
690 690 if use_dirstate:
691 691 p1, p2 = self.dirstate.parents()
692 692 update_dirstate = True
693 693 else:
694 694 p1, p2 = p1, p2 or nullid
695 695 update_dirstate = (self.dirstate.parents()[0] == p1)
696 696
697 697 c1 = self.changelog.read(p1)
698 698 c2 = self.changelog.read(p2)
699 699 m1 = self.manifest.read(c1[0]).copy()
700 700 m2 = self.manifest.read(c2[0])
701 701
702 702 if use_dirstate:
703 703 branchname = self.workingctx().branch()
704 704 try:
705 705 branchname = branchname.decode('UTF-8').encode('UTF-8')
706 706 except UnicodeDecodeError:
707 707 raise util.Abort(_('branch name not in UTF-8!'))
708 708 else:
709 709 branchname = ""
710 710
711 711 if use_dirstate:
712 712 oldname = c1[5].get("branch") # stored in UTF-8
713 713 if (not commit and not remove and not force and p2 == nullid
714 714 and branchname == oldname):
715 715 self.ui.status(_("nothing changed\n"))
716 716 return None
717 717
718 718 xp1 = hex(p1)
719 719 if p2 == nullid: xp2 = ''
720 720 else: xp2 = hex(p2)
721 721
722 722 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
723 723
724 724 wlock = self.wlock()
725 725 lock = self.lock()
726 726 tr = self.transaction()
727 727 trp = weakref.proxy(tr)
728 728
729 729 # check in files
730 730 new = {}
731 731 linkrev = self.changelog.count()
732 732 commit.sort()
733 733 is_exec = util.execfunc(self.root, m1.execf)
734 734 is_link = util.linkfunc(self.root, m1.linkf)
735 735 for f in commit:
736 736 self.ui.note(f + "\n")
737 737 try:
738 738 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
739 739 new_exec = is_exec(f)
740 740 new_link = is_link(f)
741 741 if ((not changed or changed[-1] != f) and
742 742 m2.get(f) != new[f]):
743 743 # mention the file in the changelog if some
744 744 # flag changed, even if there was no content
745 745 # change.
746 746 old_exec = m1.execf(f)
747 747 old_link = m1.linkf(f)
748 748 if old_exec != new_exec or old_link != new_link:
749 749 changed.append(f)
750 750 m1.set(f, new_exec, new_link)
751 751 except (OSError, IOError):
752 752 if use_dirstate:
753 753 self.ui.warn(_("trouble committing %s!\n") % f)
754 754 raise
755 755 else:
756 756 remove.append(f)
757 757
758 758 # update manifest
759 759 m1.update(new)
760 760 remove.sort()
761 761 removed = []
762 762
763 763 for f in remove:
764 764 if f in m1:
765 765 del m1[f]
766 766 removed.append(f)
767 767 elif f in m2:
768 768 removed.append(f)
769 769 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
770 770 (new, removed))
771 771
772 772 # add changeset
773 773 new = new.keys()
774 774 new.sort()
775 775
776 776 user = user or self.ui.username()
777 777 if (not empty_ok and not text) or force_editor:
778 778 edittext = []
779 779 if text:
780 780 edittext.append(text)
781 781 edittext.append("")
782 782 edittext.append("HG: user: %s" % user)
783 783 if p2 != nullid:
784 784 edittext.append("HG: branch merge")
785 785 if branchname:
786 786 edittext.append("HG: branch %s" % util.tolocal(branchname))
787 787 edittext.extend(["HG: changed %s" % f for f in changed])
788 788 edittext.extend(["HG: removed %s" % f for f in removed])
789 789 if not changed and not remove:
790 790 edittext.append("HG: no files changed")
791 791 edittext.append("")
792 792 # run editor in the repository root
793 793 olddir = os.getcwd()
794 794 os.chdir(self.root)
795 795 text = self.ui.edit("\n".join(edittext), user)
796 796 os.chdir(olddir)
797 797
798 798 if branchname:
799 799 extra["branch"] = branchname
800 800
801 801 if use_dirstate:
802 802 lines = [line.rstrip() for line in text.rstrip().splitlines()]
803 803 while lines and not lines[0]:
804 804 del lines[0]
805 805 if not lines:
806 806 return None
807 807 text = '\n'.join(lines)
808 808
809 809 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
810 810 user, date, extra)
811 811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
812 812 parent2=xp2)
813 813 tr.close()
814 814
815 815 if self.branchcache and "branch" in extra:
816 816 self.branchcache[util.tolocal(extra["branch"])] = n
817 817
818 818 if use_dirstate or update_dirstate:
819 819 self.dirstate.setparents(n)
820 820 if use_dirstate:
821 821 for f in new:
822 822 self.dirstate.normal(f)
823 823 for f in removed:
824 824 self.dirstate.forget(f)
825 825
826 826 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
827 827 return n
828 828 finally:
829 829 del tr, lock, wlock
830 830
831 831 def walk(self, node=None, files=[], match=util.always, badmatch=None):
832 832 '''
833 833 walk recursively through the directory tree or a given
834 834 changeset, finding all files matched by the match
835 835 function
836 836
837 837 results are yielded in a tuple (src, filename), where src
838 838 is one of:
839 839 'f' the file was found in the directory tree
840 840 'm' the file was only in the dirstate and not in the tree
841 841 'b' file was not found and matched badmatch
842 842 '''
843 843
844 844 if node:
845 845 fdict = dict.fromkeys(files)
846 846 # for dirstate.walk, files=['.'] means "walk the whole tree".
847 847 # follow that here, too
848 848 fdict.pop('.', None)
849 849 mdict = self.manifest.read(self.changelog.read(node)[0])
850 850 mfiles = mdict.keys()
851 851 mfiles.sort()
852 852 for fn in mfiles:
853 853 for ffn in fdict:
854 854 # match if the file is the exact name or a directory
855 855 if ffn == fn or fn.startswith("%s/" % ffn):
856 856 del fdict[ffn]
857 857 break
858 858 if match(fn):
859 859 yield 'm', fn
860 860 ffiles = fdict.keys()
861 861 ffiles.sort()
862 862 for fn in ffiles:
863 863 if badmatch and badmatch(fn):
864 864 if match(fn):
865 865 yield 'b', fn
866 866 else:
867 867 self.ui.warn(_('%s: No such file in rev %s\n')
868 868 % (self.pathto(fn), short(node)))
869 869 else:
870 870 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
871 871 yield src, fn
872 872
873 873 def status(self, node1=None, node2=None, files=[], match=util.always,
874 874 list_ignored=False, list_clean=False):
875 875 """return status of files between two nodes or node and working directory
876 876
877 877 If node1 is None, use the first dirstate parent instead.
878 878 If node2 is None, compare node1 with working directory.
879 879 """
880 880
881 881 def fcmp(fn, getnode):
882 882 t1 = self.wread(fn)
883 883 return self.file(fn).cmp(getnode(fn), t1)
884 884
885 885 def mfmatches(node):
886 886 change = self.changelog.read(node)
887 887 mf = self.manifest.read(change[0]).copy()
888 888 for fn in mf.keys():
889 889 if not match(fn):
890 890 del mf[fn]
891 891 return mf
892 892
893 893 modified, added, removed, deleted, unknown = [], [], [], [], []
894 894 ignored, clean = [], []
895 895
896 896 compareworking = False
897 897 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
898 898 compareworking = True
899 899
900 900 if not compareworking:
901 901 # read the manifest from node1 before the manifest from node2,
902 902 # so that we'll hit the manifest cache if we're going through
903 903 # all the revisions in parent->child order.
904 904 mf1 = mfmatches(node1)
905 905
906 906 # are we comparing the working directory?
907 907 if not node2:
908 908 (lookup, modified, added, removed, deleted, unknown,
909 909 ignored, clean) = self.dirstate.status(files, match,
910 910 list_ignored, list_clean)
911 911
912 912 # are we comparing working dir against its parent?
913 913 if compareworking:
914 914 if lookup:
915 915 fixup = []
916 916 # do a full compare of any files that might have changed
917 917 ctx = self.changectx()
918 918 for f in lookup:
919 919 if f not in ctx or ctx[f].cmp(self.wread(f)):
920 920 modified.append(f)
921 921 else:
922 922 fixup.append(f)
923 923 if list_clean:
924 924 clean.append(f)
925 925
926 926 # update dirstate for files that are actually clean
927 927 if fixup:
928 928 wlock = None
929 929 try:
930 930 try:
931 931 wlock = self.wlock(False)
932 932 except lock.LockException:
933 933 pass
934 934 if wlock:
935 935 for f in fixup:
936 936 self.dirstate.normal(f)
937 937 finally:
938 938 del wlock
939 939 else:
940 940 # we are comparing working dir against non-parent
941 941 # generate a pseudo-manifest for the working dir
942 942 # XXX: create it in dirstate.py ?
943 943 mf2 = mfmatches(self.dirstate.parents()[0])
944 944 is_exec = util.execfunc(self.root, mf2.execf)
945 945 is_link = util.linkfunc(self.root, mf2.linkf)
946 946 for f in lookup + modified + added:
947 947 mf2[f] = ""
948 948 mf2.set(f, is_exec(f), is_link(f))
949 949 for f in removed:
950 950 if f in mf2:
951 951 del mf2[f]
952 952
953 953 else:
954 954 # we are comparing two revisions
955 955 mf2 = mfmatches(node2)
956 956
957 957 if not compareworking:
958 958 # flush lists from dirstate before comparing manifests
959 959 modified, added, clean = [], [], []
960 960
961 961 # make sure to sort the files so we talk to the disk in a
962 962 # reasonable order
963 963 mf2keys = mf2.keys()
964 964 mf2keys.sort()
965 965 getnode = lambda fn: mf1.get(fn, nullid)
966 966 for fn in mf2keys:
967 967 if mf1.has_key(fn):
968 968 if (mf1.flags(fn) != mf2.flags(fn) or
969 969 (mf1[fn] != mf2[fn] and
970 970 (mf2[fn] != "" or fcmp(fn, getnode)))):
971 971 modified.append(fn)
972 972 elif list_clean:
973 973 clean.append(fn)
974 974 del mf1[fn]
975 975 else:
976 976 added.append(fn)
977 977
978 978 removed = mf1.keys()
979 979
980 980 # sort and return results:
981 981 for l in modified, added, removed, deleted, unknown, ignored, clean:
982 982 l.sort()
983 983 return (modified, added, removed, deleted, unknown, ignored, clean)
984 984
985 985 def add(self, list):
986 986 wlock = self.wlock()
987 987 try:
988 988 for f in list:
989 989 p = self.wjoin(f)
990 990 try:
991 991 st = os.lstat(p)
992 992 except:
993 993 self.ui.warn(_("%s does not exist!\n") % f)
994 994 continue
995 995 if st.st_size > 10000000:
996 996 self.ui.warn(_("%s: files over 10MB may cause memory and"
997 997 " performance problems\n"
998 998 "(use 'hg revert %s' to unadd the file)\n")
999 999 % (f, f))
1000 1000 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1001 1001 self.ui.warn(_("%s not added: only files and symlinks "
1002 1002 "supported currently\n") % f)
1003 1003 elif self.dirstate[f] in 'amn':
1004 1004 self.ui.warn(_("%s already tracked!\n") % f)
1005 1005 elif self.dirstate[f] == 'r':
1006 1006 self.dirstate.normallookup(f)
1007 1007 else:
1008 1008 self.dirstate.add(f)
1009 1009 finally:
1010 1010 del wlock
1011 1011
1012 1012 def forget(self, list):
1013 1013 wlock = self.wlock()
1014 1014 try:
1015 1015 for f in list:
1016 1016 if self.dirstate[f] != 'a':
1017 1017 self.ui.warn(_("%s not added!\n") % f)
1018 1018 else:
1019 1019 self.dirstate.forget(f)
1020 1020 finally:
1021 1021 del wlock
1022 1022
1023 1023 def remove(self, list, unlink=False):
1024 1024 wlock = None
1025 1025 try:
1026 1026 if unlink:
1027 1027 for f in list:
1028 1028 try:
1029 1029 util.unlink(self.wjoin(f))
1030 1030 except OSError, inst:
1031 1031 if inst.errno != errno.ENOENT:
1032 1032 raise
1033 1033 wlock = self.wlock()
1034 1034 for f in list:
1035 1035 if unlink and os.path.exists(self.wjoin(f)):
1036 1036 self.ui.warn(_("%s still exists!\n") % f)
1037 1037 elif self.dirstate[f] == 'a':
1038 1038 self.dirstate.forget(f)
1039 1039 elif f not in self.dirstate:
1040 1040 self.ui.warn(_("%s not tracked!\n") % f)
1041 1041 else:
1042 1042 self.dirstate.remove(f)
1043 1043 finally:
1044 1044 del wlock
1045 1045
1046 1046 def undelete(self, list):
1047 1047 wlock = None
1048 1048 try:
1049 1049 p = self.dirstate.parents()[0]
1050 1050 mn = self.changelog.read(p)[0]
1051 1051 m = self.manifest.read(mn)
1052 1052 wlock = self.wlock()
1053 1053 for f in list:
1054 1054 if self.dirstate[f] != 'r':
1055 1055 self.ui.warn("%s not removed!\n" % f)
1056 1056 else:
1057 1057 t = self.file(f).read(m[f])
1058 1058 self.wwrite(f, t, m.flags(f))
1059 1059 self.dirstate.normal(f)
1060 1060 finally:
1061 1061 del wlock
1062 1062
1063 1063 def copy(self, source, dest):
1064 1064 wlock = None
1065 1065 try:
1066 1066 p = self.wjoin(dest)
1067 1067 if not (os.path.exists(p) or os.path.islink(p)):
1068 1068 self.ui.warn(_("%s does not exist!\n") % dest)
1069 1069 elif not (os.path.isfile(p) or os.path.islink(p)):
1070 1070 self.ui.warn(_("copy failed: %s is not a file or a "
1071 1071 "symbolic link\n") % dest)
1072 1072 else:
1073 1073 wlock = self.wlock()
1074 1074 if dest not in self.dirstate:
1075 1075 self.dirstate.add(dest)
1076 1076 self.dirstate.copy(source, dest)
1077 1077 finally:
1078 1078 del wlock
1079 1079
1080 1080 def heads(self, start=None):
1081 1081 heads = self.changelog.heads(start)
1082 1082 # sort the output in rev descending order
1083 1083 heads = [(-self.changelog.rev(h), h) for h in heads]
1084 1084 heads.sort()
1085 1085 return [n for (r, n) in heads]
1086 1086
1087 1087 def branchheads(self, branch, start=None):
1088 1088 branches = self.branchtags()
1089 1089 if branch not in branches:
1090 1090 return []
1091 1091 # The basic algorithm is this:
1092 1092 #
1093 1093 # Start from the branch tip since there are no later revisions that can
1094 1094 # possibly be in this branch, and the tip is a guaranteed head.
1095 1095 #
1096 1096 # Remember the tip's parents as the first ancestors, since these by
1097 1097 # definition are not heads.
1098 1098 #
1099 1099 # Step backwards from the brach tip through all the revisions. We are
1100 1100 # guaranteed by the rules of Mercurial that we will now be visiting the
1101 1101 # nodes in reverse topological order (children before parents).
1102 1102 #
1103 1103 # If a revision is one of the ancestors of a head then we can toss it
1104 1104 # out of the ancestors set (we've already found it and won't be
1105 1105 # visiting it again) and put its parents in the ancestors set.
1106 1106 #
1107 1107 # Otherwise, if a revision is in the branch it's another head, since it
1108 1108 # wasn't in the ancestor list of an existing head. So add it to the
1109 1109 # head list, and add its parents to the ancestor list.
1110 1110 #
1111 1111 # If it is not in the branch ignore it.
1112 1112 #
1113 1113 # Once we have a list of heads, use nodesbetween to filter out all the
1114 1114 # heads that cannot be reached from startrev. There may be a more
1115 1115 # efficient way to do this as part of the previous algorithm.
1116 1116
1117 1117 set = util.set
1118 1118 heads = [self.changelog.rev(branches[branch])]
1119 1119 # Don't care if ancestors contains nullrev or not.
1120 1120 ancestors = set(self.changelog.parentrevs(heads[0]))
1121 1121 for rev in xrange(heads[0] - 1, nullrev, -1):
1122 1122 if rev in ancestors:
1123 1123 ancestors.update(self.changelog.parentrevs(rev))
1124 1124 ancestors.remove(rev)
1125 1125 elif self.changectx(rev).branch() == branch:
1126 1126 heads.append(rev)
1127 1127 ancestors.update(self.changelog.parentrevs(rev))
1128 1128 heads = [self.changelog.node(rev) for rev in heads]
1129 1129 if start is not None:
1130 1130 heads = self.changelog.nodesbetween([start], heads)[2]
1131 1131 return heads
1132 1132
1133 1133 def branches(self, nodes):
1134 1134 if not nodes:
1135 1135 nodes = [self.changelog.tip()]
1136 1136 b = []
1137 1137 for n in nodes:
1138 1138 t = n
1139 1139 while 1:
1140 1140 p = self.changelog.parents(n)
1141 1141 if p[1] != nullid or p[0] == nullid:
1142 1142 b.append((t, n, p[0], p[1]))
1143 1143 break
1144 1144 n = p[0]
1145 1145 return b
1146 1146
1147 1147 def between(self, pairs):
1148 1148 r = []
1149 1149
1150 1150 for top, bottom in pairs:
1151 1151 n, l, i = top, [], 0
1152 1152 f = 1
1153 1153
1154 1154 while n != bottom:
1155 1155 p = self.changelog.parents(n)[0]
1156 1156 if i == f:
1157 1157 l.append(n)
1158 1158 f = f * 2
1159 1159 n = p
1160 1160 i += 1
1161 1161
1162 1162 r.append(l)
1163 1163
1164 1164 return r
1165 1165
1166 1166 def findincoming(self, remote, base=None, heads=None, force=False):
1167 1167 """Return list of roots of the subsets of missing nodes from remote
1168 1168
1169 1169 If base dict is specified, assume that these nodes and their parents
1170 1170 exist on the remote side and that no child of a node of base exists
1171 1171 in both remote and self.
1172 1172 Furthermore base will be updated to include the nodes that exists
1173 1173 in self and remote but no children exists in self and remote.
1174 1174 If a list of heads is specified, return only nodes which are heads
1175 1175 or ancestors of these heads.
1176 1176
1177 1177 All the ancestors of base are in self and in remote.
1178 1178 All the descendants of the list returned are missing in self.
1179 1179 (and so we know that the rest of the nodes are missing in remote, see
1180 1180 outgoing)
1181 1181 """
1182 1182 m = self.changelog.nodemap
1183 1183 search = []
1184 1184 fetch = {}
1185 1185 seen = {}
1186 1186 seenbranch = {}
1187 1187 if base == None:
1188 1188 base = {}
1189 1189
1190 1190 if not heads:
1191 1191 heads = remote.heads()
1192 1192
1193 1193 if self.changelog.tip() == nullid:
1194 1194 base[nullid] = 1
1195 1195 if heads != [nullid]:
1196 1196 return [nullid]
1197 1197 return []
1198 1198
1199 1199 # assume we're closer to the tip than the root
1200 1200 # and start by examining the heads
1201 1201 self.ui.status(_("searching for changes\n"))
1202 1202
1203 1203 unknown = []
1204 1204 for h in heads:
1205 1205 if h not in m:
1206 1206 unknown.append(h)
1207 1207 else:
1208 1208 base[h] = 1
1209 1209
1210 1210 if not unknown:
1211 1211 return []
1212 1212
1213 1213 req = dict.fromkeys(unknown)
1214 1214 reqcnt = 0
1215 1215
1216 1216 # search through remote branches
1217 1217 # a 'branch' here is a linear segment of history, with four parts:
1218 1218 # head, root, first parent, second parent
1219 1219 # (a branch always has two parents (or none) by definition)
1220 1220 unknown = remote.branches(unknown)
1221 1221 while unknown:
1222 1222 r = []
1223 1223 while unknown:
1224 1224 n = unknown.pop(0)
1225 1225 if n[0] in seen:
1226 1226 continue
1227 1227
1228 1228 self.ui.debug(_("examining %s:%s\n")
1229 1229 % (short(n[0]), short(n[1])))
1230 1230 if n[0] == nullid: # found the end of the branch
1231 1231 pass
1232 1232 elif n in seenbranch:
1233 1233 self.ui.debug(_("branch already found\n"))
1234 1234 continue
1235 1235 elif n[1] and n[1] in m: # do we know the base?
1236 1236 self.ui.debug(_("found incomplete branch %s:%s\n")
1237 1237 % (short(n[0]), short(n[1])))
1238 1238 search.append(n) # schedule branch range for scanning
1239 1239 seenbranch[n] = 1
1240 1240 else:
1241 1241 if n[1] not in seen and n[1] not in fetch:
1242 1242 if n[2] in m and n[3] in m:
1243 1243 self.ui.debug(_("found new changeset %s\n") %
1244 1244 short(n[1]))
1245 1245 fetch[n[1]] = 1 # earliest unknown
1246 1246 for p in n[2:4]:
1247 1247 if p in m:
1248 1248 base[p] = 1 # latest known
1249 1249
1250 1250 for p in n[2:4]:
1251 1251 if p not in req and p not in m:
1252 1252 r.append(p)
1253 1253 req[p] = 1
1254 1254 seen[n[0]] = 1
1255 1255
1256 1256 if r:
1257 1257 reqcnt += 1
1258 1258 self.ui.debug(_("request %d: %s\n") %
1259 1259 (reqcnt, " ".join(map(short, r))))
1260 1260 for p in xrange(0, len(r), 10):
1261 1261 for b in remote.branches(r[p:p+10]):
1262 1262 self.ui.debug(_("received %s:%s\n") %
1263 1263 (short(b[0]), short(b[1])))
1264 1264 unknown.append(b)
1265 1265
1266 1266 # do binary search on the branches we found
1267 1267 while search:
1268 1268 n = search.pop(0)
1269 1269 reqcnt += 1
1270 1270 l = remote.between([(n[0], n[1])])[0]
1271 1271 l.append(n[1])
1272 1272 p = n[0]
1273 1273 f = 1
1274 1274 for i in l:
1275 1275 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1276 1276 if i in m:
1277 1277 if f <= 2:
1278 1278 self.ui.debug(_("found new branch changeset %s\n") %
1279 1279 short(p))
1280 1280 fetch[p] = 1
1281 1281 base[i] = 1
1282 1282 else:
1283 1283 self.ui.debug(_("narrowed branch search to %s:%s\n")
1284 1284 % (short(p), short(i)))
1285 1285 search.append((p, i))
1286 1286 break
1287 1287 p, f = i, f * 2
1288 1288
1289 1289 # sanity check our fetch list
1290 1290 for f in fetch.keys():
1291 1291 if f in m:
1292 1292 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1293 1293
1294 1294 if base.keys() == [nullid]:
1295 1295 if force:
1296 1296 self.ui.warn(_("warning: repository is unrelated\n"))
1297 1297 else:
1298 1298 raise util.Abort(_("repository is unrelated"))
1299 1299
1300 1300 self.ui.debug(_("found new changesets starting at ") +
1301 1301 " ".join([short(f) for f in fetch]) + "\n")
1302 1302
1303 1303 self.ui.debug(_("%d total queries\n") % reqcnt)
1304 1304
1305 1305 return fetch.keys()
1306 1306
1307 1307 def findoutgoing(self, remote, base=None, heads=None, force=False):
1308 1308 """Return list of nodes that are roots of subsets not in remote
1309 1309
1310 1310 If base dict is specified, assume that these nodes and their parents
1311 1311 exist on the remote side.
1312 1312 If a list of heads is specified, return only nodes which are heads
1313 1313 or ancestors of these heads, and return a second element which
1314 1314 contains all remote heads which get new children.
1315 1315 """
1316 1316 if base == None:
1317 1317 base = {}
1318 1318 self.findincoming(remote, base, heads, force=force)
1319 1319
1320 1320 self.ui.debug(_("common changesets up to ")
1321 1321 + " ".join(map(short, base.keys())) + "\n")
1322 1322
1323 1323 remain = dict.fromkeys(self.changelog.nodemap)
1324 1324
1325 1325 # prune everything remote has from the tree
1326 1326 del remain[nullid]
1327 1327 remove = base.keys()
1328 1328 while remove:
1329 1329 n = remove.pop(0)
1330 1330 if n in remain:
1331 1331 del remain[n]
1332 1332 for p in self.changelog.parents(n):
1333 1333 remove.append(p)
1334 1334
1335 1335 # find every node whose parents have been pruned
1336 1336 subset = []
1337 1337 # find every remote head that will get new children
1338 1338 updated_heads = {}
1339 1339 for n in remain:
1340 1340 p1, p2 = self.changelog.parents(n)
1341 1341 if p1 not in remain and p2 not in remain:
1342 1342 subset.append(n)
1343 1343 if heads:
1344 1344 if p1 in heads:
1345 1345 updated_heads[p1] = True
1346 1346 if p2 in heads:
1347 1347 updated_heads[p2] = True
1348 1348
1349 1349 # this is the set of all roots we have to push
1350 1350 if heads:
1351 1351 return subset, updated_heads.keys()
1352 1352 else:
1353 1353 return subset
1354 1354
1355 1355 def pull(self, remote, heads=None, force=False):
1356 1356 lock = self.lock()
1357 1357 try:
1358 1358 fetch = self.findincoming(remote, heads=heads, force=force)
1359 1359 if fetch == [nullid]:
1360 1360 self.ui.status(_("requesting all changes\n"))
1361 1361
1362 1362 if not fetch:
1363 1363 self.ui.status(_("no changes found\n"))
1364 1364 return 0
1365 1365
1366 1366 if heads is None:
1367 1367 cg = remote.changegroup(fetch, 'pull')
1368 1368 else:
1369 1369 if 'changegroupsubset' not in remote.capabilities:
1370 1370 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1371 1371 cg = remote.changegroupsubset(fetch, heads, 'pull')
1372 1372 return self.addchangegroup(cg, 'pull', remote.url())
1373 1373 finally:
1374 1374 del lock
1375 1375
1376 1376 def push(self, remote, force=False, revs=None):
1377 1377 # there are two ways to push to remote repo:
1378 1378 #
1379 1379 # addchangegroup assumes local user can lock remote
1380 1380 # repo (local filesystem, old ssh servers).
1381 1381 #
1382 1382 # unbundle assumes local user cannot lock remote repo (new ssh
1383 1383 # servers, http servers).
1384 1384
1385 1385 if remote.capable('unbundle'):
1386 1386 return self.push_unbundle(remote, force, revs)
1387 1387 return self.push_addchangegroup(remote, force, revs)
1388 1388
1389 1389 def prepush(self, remote, force, revs):
1390 1390 base = {}
1391 1391 remote_heads = remote.heads()
1392 1392 inc = self.findincoming(remote, base, remote_heads, force=force)
1393 1393
1394 1394 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1395 1395 if revs is not None:
1396 1396 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1397 1397 else:
1398 1398 bases, heads = update, self.changelog.heads()
1399 1399
1400 1400 if not bases:
1401 1401 self.ui.status(_("no changes found\n"))
1402 1402 return None, 1
1403 1403 elif not force:
1404 1404 # check if we're creating new remote heads
1405 1405 # to be a remote head after push, node must be either
1406 1406 # - unknown locally
1407 1407 # - a local outgoing head descended from update
1408 1408 # - a remote head that's known locally and not
1409 1409 # ancestral to an outgoing head
1410 1410
1411 1411 warn = 0
1412 1412
1413 1413 if remote_heads == [nullid]:
1414 1414 warn = 0
1415 1415 elif not revs and len(heads) > len(remote_heads):
1416 1416 warn = 1
1417 1417 else:
1418 1418 newheads = list(heads)
1419 1419 for r in remote_heads:
1420 1420 if r in self.changelog.nodemap:
1421 1421 desc = self.changelog.heads(r, heads)
1422 1422 l = [h for h in heads if h in desc]
1423 1423 if not l:
1424 1424 newheads.append(r)
1425 1425 else:
1426 1426 newheads.append(r)
1427 1427 if len(newheads) > len(remote_heads):
1428 1428 warn = 1
1429 1429
1430 1430 if warn:
1431 1431 self.ui.warn(_("abort: push creates new remote branches!\n"))
1432 1432 self.ui.status(_("(did you forget to merge?"
1433 1433 " use push -f to force)\n"))
1434 1434 return None, 1
1435 1435 elif inc:
1436 1436 self.ui.warn(_("note: unsynced remote changes!\n"))
1437 1437
1438 1438
1439 1439 if revs is None:
1440 1440 cg = self.changegroup(update, 'push')
1441 1441 else:
1442 1442 cg = self.changegroupsubset(update, revs, 'push')
1443 1443 return cg, remote_heads
1444 1444
1445 1445 def push_addchangegroup(self, remote, force, revs):
1446 1446 lock = remote.lock()
1447 1447 try:
1448 1448 ret = self.prepush(remote, force, revs)
1449 1449 if ret[0] is not None:
1450 1450 cg, remote_heads = ret
1451 1451 return remote.addchangegroup(cg, 'push', self.url())
1452 1452 return ret[1]
1453 1453 finally:
1454 1454 del lock
1455 1455
1456 1456 def push_unbundle(self, remote, force, revs):
1457 1457 # local repo finds heads on server, finds out what revs it
1458 1458 # must push. once revs transferred, if server finds it has
1459 1459 # different heads (someone else won commit/push race), server
1460 1460 # aborts.
1461 1461
1462 1462 ret = self.prepush(remote, force, revs)
1463 1463 if ret[0] is not None:
1464 1464 cg, remote_heads = ret
1465 1465 if force: remote_heads = ['force']
1466 1466 return remote.unbundle(cg, remote_heads, 'push')
1467 1467 return ret[1]
1468 1468
1469 1469 def changegroupinfo(self, nodes):
1470 1470 self.ui.note(_("%d changesets found\n") % len(nodes))
1471 1471 if self.ui.debugflag:
1472 1472 self.ui.debug(_("List of changesets:\n"))
1473 1473 for node in nodes:
1474 1474 self.ui.debug("%s\n" % hex(node))
1475 1475
1476 1476 def changegroupsubset(self, bases, heads, source):
1477 1477 """This function generates a changegroup consisting of all the nodes
1478 1478 that are descendents of any of the bases, and ancestors of any of
1479 1479 the heads.
1480 1480
1481 1481 It is fairly complex as determining which filenodes and which
1482 1482 manifest nodes need to be included for the changeset to be complete
1483 1483 is non-trivial.
1484 1484
1485 1485 Another wrinkle is doing the reverse, figuring out which changeset in
1486 1486 the changegroup a particular filenode or manifestnode belongs to."""
1487 1487
1488 1488 self.hook('preoutgoing', throw=True, source=source)
1489 1489
1490 1490 # Set up some initial variables
1491 1491 # Make it easy to refer to self.changelog
1492 1492 cl = self.changelog
1493 1493 # msng is short for missing - compute the list of changesets in this
1494 1494 # changegroup.
1495 1495 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1496 1496 self.changegroupinfo(msng_cl_lst)
1497 1497 # Some bases may turn out to be superfluous, and some heads may be
1498 1498 # too. nodesbetween will return the minimal set of bases and heads
1499 1499 # necessary to re-create the changegroup.
1500 1500
1501 1501 # Known heads are the list of heads that it is assumed the recipient
1502 1502 # of this changegroup will know about.
1503 1503 knownheads = {}
1504 1504 # We assume that all parents of bases are known heads.
1505 1505 for n in bases:
1506 1506 for p in cl.parents(n):
1507 1507 if p != nullid:
1508 1508 knownheads[p] = 1
1509 1509 knownheads = knownheads.keys()
1510 1510 if knownheads:
1511 1511 # Now that we know what heads are known, we can compute which
1512 1512 # changesets are known. The recipient must know about all
1513 1513 # changesets required to reach the known heads from the null
1514 1514 # changeset.
1515 1515 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1516 1516 junk = None
1517 1517 # Transform the list into an ersatz set.
1518 1518 has_cl_set = dict.fromkeys(has_cl_set)
1519 1519 else:
1520 1520 # If there were no known heads, the recipient cannot be assumed to
1521 1521 # know about any changesets.
1522 1522 has_cl_set = {}
1523 1523
1524 1524 # Make it easy to refer to self.manifest
1525 1525 mnfst = self.manifest
1526 1526 # We don't know which manifests are missing yet
1527 1527 msng_mnfst_set = {}
1528 1528 # Nor do we know which filenodes are missing.
1529 1529 msng_filenode_set = {}
1530 1530
1531 1531 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1532 1532 junk = None
1533 1533
1534 1534 # A changeset always belongs to itself, so the changenode lookup
1535 1535 # function for a changenode is identity.
1536 1536 def identity(x):
1537 1537 return x
1538 1538
1539 1539 # A function generating function. Sets up an environment for the
1540 1540 # inner function.
1541 1541 def cmp_by_rev_func(revlog):
1542 1542 # Compare two nodes by their revision number in the environment's
1543 1543 # revision history. Since the revision number both represents the
1544 1544 # most efficient order to read the nodes in, and represents a
1545 1545 # topological sorting of the nodes, this function is often useful.
1546 1546 def cmp_by_rev(a, b):
1547 1547 return cmp(revlog.rev(a), revlog.rev(b))
1548 1548 return cmp_by_rev
1549 1549
1550 1550 # If we determine that a particular file or manifest node must be a
1551 1551 # node that the recipient of the changegroup will already have, we can
1552 1552 # also assume the recipient will have all the parents. This function
1553 1553 # prunes them from the set of missing nodes.
1554 1554 def prune_parents(revlog, hasset, msngset):
1555 1555 haslst = hasset.keys()
1556 1556 haslst.sort(cmp_by_rev_func(revlog))
1557 1557 for node in haslst:
1558 1558 parentlst = [p for p in revlog.parents(node) if p != nullid]
1559 1559 while parentlst:
1560 1560 n = parentlst.pop()
1561 1561 if n not in hasset:
1562 1562 hasset[n] = 1
1563 1563 p = [p for p in revlog.parents(n) if p != nullid]
1564 1564 parentlst.extend(p)
1565 1565 for n in hasset:
1566 1566 msngset.pop(n, None)
1567 1567
1568 1568 # This is a function generating function used to set up an environment
1569 1569 # for the inner function to execute in.
1570 1570 def manifest_and_file_collector(changedfileset):
1571 1571 # This is an information gathering function that gathers
1572 1572 # information from each changeset node that goes out as part of
1573 1573 # the changegroup. The information gathered is a list of which
1574 1574 # manifest nodes are potentially required (the recipient may
1575 1575 # already have them) and total list of all files which were
1576 1576 # changed in any changeset in the changegroup.
1577 1577 #
1578 1578 # We also remember the first changenode we saw any manifest
1579 1579 # referenced by so we can later determine which changenode 'owns'
1580 1580 # the manifest.
1581 1581 def collect_manifests_and_files(clnode):
1582 1582 c = cl.read(clnode)
1583 1583 for f in c[3]:
1584 1584 # This is to make sure we only have one instance of each
1585 1585 # filename string for each filename.
1586 1586 changedfileset.setdefault(f, f)
1587 1587 msng_mnfst_set.setdefault(c[0], clnode)
1588 1588 return collect_manifests_and_files
1589 1589
1590 1590 # Figure out which manifest nodes (of the ones we think might be part
1591 1591 # of the changegroup) the recipient must know about and remove them
1592 1592 # from the changegroup.
1593 1593 def prune_manifests():
1594 1594 has_mnfst_set = {}
1595 1595 for n in msng_mnfst_set:
1596 1596 # If a 'missing' manifest thinks it belongs to a changenode
1597 1597 # the recipient is assumed to have, obviously the recipient
1598 1598 # must have that manifest.
1599 1599 linknode = cl.node(mnfst.linkrev(n))
1600 1600 if linknode in has_cl_set:
1601 1601 has_mnfst_set[n] = 1
1602 1602 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1603 1603
1604 1604 # Use the information collected in collect_manifests_and_files to say
1605 1605 # which changenode any manifestnode belongs to.
1606 1606 def lookup_manifest_link(mnfstnode):
1607 1607 return msng_mnfst_set[mnfstnode]
1608 1608
1609 1609 # A function generating function that sets up the initial environment
1610 1610 # the inner function.
1611 1611 def filenode_collector(changedfiles):
1612 1612 next_rev = [0]
1613 1613 # This gathers information from each manifestnode included in the
1614 1614 # changegroup about which filenodes the manifest node references
1615 1615 # so we can include those in the changegroup too.
1616 1616 #
1617 1617 # It also remembers which changenode each filenode belongs to. It
1618 1618 # does this by assuming the a filenode belongs to the changenode
1619 1619 # the first manifest that references it belongs to.
1620 1620 def collect_msng_filenodes(mnfstnode):
1621 1621 r = mnfst.rev(mnfstnode)
1622 1622 if r == next_rev[0]:
1623 1623 # If the last rev we looked at was the one just previous,
1624 1624 # we only need to see a diff.
1625 1625 deltamf = mnfst.readdelta(mnfstnode)
1626 1626 # For each line in the delta
1627 1627 for f, fnode in deltamf.items():
1628 1628 f = changedfiles.get(f, None)
1629 1629 # And if the file is in the list of files we care
1630 1630 # about.
1631 1631 if f is not None:
1632 1632 # Get the changenode this manifest belongs to
1633 1633 clnode = msng_mnfst_set[mnfstnode]
1634 1634 # Create the set of filenodes for the file if
1635 1635 # there isn't one already.
1636 1636 ndset = msng_filenode_set.setdefault(f, {})
1637 1637 # And set the filenode's changelog node to the
1638 1638 # manifest's if it hasn't been set already.
1639 1639 ndset.setdefault(fnode, clnode)
1640 1640 else:
1641 1641 # Otherwise we need a full manifest.
1642 1642 m = mnfst.read(mnfstnode)
1643 1643 # For every file in we care about.
1644 1644 for f in changedfiles:
1645 1645 fnode = m.get(f, None)
1646 1646 # If it's in the manifest
1647 1647 if fnode is not None:
1648 1648 # See comments above.
1649 1649 clnode = msng_mnfst_set[mnfstnode]
1650 1650 ndset = msng_filenode_set.setdefault(f, {})
1651 1651 ndset.setdefault(fnode, clnode)
1652 1652 # Remember the revision we hope to see next.
1653 1653 next_rev[0] = r + 1
1654 1654 return collect_msng_filenodes
1655 1655
1656 1656 # We have a list of filenodes we think we need for a file, lets remove
1657 1657 # all those we now the recipient must have.
1658 1658 def prune_filenodes(f, filerevlog):
1659 1659 msngset = msng_filenode_set[f]
1660 1660 hasset = {}
1661 1661 # If a 'missing' filenode thinks it belongs to a changenode we
1662 1662 # assume the recipient must have, then the recipient must have
1663 1663 # that filenode.
1664 1664 for n in msngset:
1665 1665 clnode = cl.node(filerevlog.linkrev(n))
1666 1666 if clnode in has_cl_set:
1667 1667 hasset[n] = 1
1668 1668 prune_parents(filerevlog, hasset, msngset)
1669 1669
1670 1670 # A function generator function that sets up the a context for the
1671 1671 # inner function.
1672 1672 def lookup_filenode_link_func(fname):
1673 1673 msngset = msng_filenode_set[fname]
1674 1674 # Lookup the changenode the filenode belongs to.
1675 1675 def lookup_filenode_link(fnode):
1676 1676 return msngset[fnode]
1677 1677 return lookup_filenode_link
1678 1678
1679 1679 # Now that we have all theses utility functions to help out and
1680 1680 # logically divide up the task, generate the group.
1681 1681 def gengroup():
1682 1682 # The set of changed files starts empty.
1683 1683 changedfiles = {}
1684 1684 # Create a changenode group generator that will call our functions
1685 1685 # back to lookup the owning changenode and collect information.
1686 1686 group = cl.group(msng_cl_lst, identity,
1687 1687 manifest_and_file_collector(changedfiles))
1688 1688 for chnk in group:
1689 1689 yield chnk
1690 1690
1691 1691 # The list of manifests has been collected by the generator
1692 1692 # calling our functions back.
1693 1693 prune_manifests()
1694 1694 msng_mnfst_lst = msng_mnfst_set.keys()
1695 1695 # Sort the manifestnodes by revision number.
1696 1696 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1697 1697 # Create a generator for the manifestnodes that calls our lookup
1698 1698 # and data collection functions back.
1699 1699 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1700 1700 filenode_collector(changedfiles))
1701 1701 for chnk in group:
1702 1702 yield chnk
1703 1703
1704 1704 # These are no longer needed, dereference and toss the memory for
1705 1705 # them.
1706 1706 msng_mnfst_lst = None
1707 1707 msng_mnfst_set.clear()
1708 1708
1709 1709 changedfiles = changedfiles.keys()
1710 1710 changedfiles.sort()
1711 1711 # Go through all our files in order sorted by name.
1712 1712 for fname in changedfiles:
1713 1713 filerevlog = self.file(fname)
1714 1714 # Toss out the filenodes that the recipient isn't really
1715 1715 # missing.
1716 1716 if msng_filenode_set.has_key(fname):
1717 1717 prune_filenodes(fname, filerevlog)
1718 1718 msng_filenode_lst = msng_filenode_set[fname].keys()
1719 1719 else:
1720 1720 msng_filenode_lst = []
1721 1721 # If any filenodes are left, generate the group for them,
1722 1722 # otherwise don't bother.
1723 1723 if len(msng_filenode_lst) > 0:
1724 1724 yield changegroup.genchunk(fname)
1725 1725 # Sort the filenodes by their revision #
1726 1726 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1727 1727 # Create a group generator and only pass in a changenode
1728 1728 # lookup function as we need to collect no information
1729 1729 # from filenodes.
1730 1730 group = filerevlog.group(msng_filenode_lst,
1731 1731 lookup_filenode_link_func(fname))
1732 1732 for chnk in group:
1733 1733 yield chnk
1734 1734 if msng_filenode_set.has_key(fname):
1735 1735 # Don't need this anymore, toss it to free memory.
1736 1736 del msng_filenode_set[fname]
1737 1737 # Signal that no more groups are left.
1738 1738 yield changegroup.closechunk()
1739 1739
1740 1740 if msng_cl_lst:
1741 1741 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1742 1742
1743 1743 return util.chunkbuffer(gengroup())
1744 1744
1745 1745 def changegroup(self, basenodes, source):
1746 1746 """Generate a changegroup of all nodes that we have that a recipient
1747 1747 doesn't.
1748 1748
1749 1749 This is much easier than the previous function as we can assume that
1750 1750 the recipient has any changenode we aren't sending them."""
1751 1751
1752 1752 self.hook('preoutgoing', throw=True, source=source)
1753 1753
1754 1754 cl = self.changelog
1755 1755 nodes = cl.nodesbetween(basenodes, None)[0]
1756 1756 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1757 1757 self.changegroupinfo(nodes)
1758 1758
1759 1759 def identity(x):
1760 1760 return x
1761 1761
1762 1762 def gennodelst(revlog):
1763 1763 for r in xrange(0, revlog.count()):
1764 1764 n = revlog.node(r)
1765 1765 if revlog.linkrev(n) in revset:
1766 1766 yield n
1767 1767
1768 1768 def changed_file_collector(changedfileset):
1769 1769 def collect_changed_files(clnode):
1770 1770 c = cl.read(clnode)
1771 1771 for fname in c[3]:
1772 1772 changedfileset[fname] = 1
1773 1773 return collect_changed_files
1774 1774
1775 1775 def lookuprevlink_func(revlog):
1776 1776 def lookuprevlink(n):
1777 1777 return cl.node(revlog.linkrev(n))
1778 1778 return lookuprevlink
1779 1779
1780 1780 def gengroup():
1781 1781 # construct a list of all changed files
1782 1782 changedfiles = {}
1783 1783
1784 1784 for chnk in cl.group(nodes, identity,
1785 1785 changed_file_collector(changedfiles)):
1786 1786 yield chnk
1787 1787 changedfiles = changedfiles.keys()
1788 1788 changedfiles.sort()
1789 1789
1790 1790 mnfst = self.manifest
1791 1791 nodeiter = gennodelst(mnfst)
1792 1792 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1793 1793 yield chnk
1794 1794
1795 1795 for fname in changedfiles:
1796 1796 filerevlog = self.file(fname)
1797 1797 nodeiter = gennodelst(filerevlog)
1798 1798 nodeiter = list(nodeiter)
1799 1799 if nodeiter:
1800 1800 yield changegroup.genchunk(fname)
1801 1801 lookup = lookuprevlink_func(filerevlog)
1802 1802 for chnk in filerevlog.group(nodeiter, lookup):
1803 1803 yield chnk
1804 1804
1805 1805 yield changegroup.closechunk()
1806 1806
1807 1807 if nodes:
1808 1808 self.hook('outgoing', node=hex(nodes[0]), source=source)
1809 1809
1810 1810 return util.chunkbuffer(gengroup())
1811 1811
1812 1812 def addchangegroup(self, source, srctype, url):
1813 1813 """add changegroup to repo.
1814 1814
1815 1815 return values:
1816 1816 - nothing changed or no source: 0
1817 1817 - more heads than before: 1+added heads (2..n)
1818 1818 - less heads than before: -1-removed heads (-2..-n)
1819 1819 - number of heads stays the same: 1
1820 1820 """
1821 1821 def csmap(x):
1822 1822 self.ui.debug(_("add changeset %s\n") % short(x))
1823 1823 return cl.count()
1824 1824
1825 1825 def revmap(x):
1826 1826 return cl.rev(x)
1827 1827
1828 1828 if not source:
1829 1829 return 0
1830 1830
1831 1831 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1832 1832
1833 1833 changesets = files = revisions = 0
1834 1834
1835 1835 # write changelog data to temp files so concurrent readers will not see
1836 1836 # inconsistent view
1837 1837 cl = self.changelog
1838 1838 cl.delayupdate()
1839 1839 oldheads = len(cl.heads())
1840 1840
1841 1841 tr = self.transaction()
1842 1842 try:
1843 1843 trp = weakref.proxy(tr)
1844 1844 # pull off the changeset group
1845 1845 self.ui.status(_("adding changesets\n"))
1846 1846 cor = cl.count() - 1
1847 1847 chunkiter = changegroup.chunkiter(source)
1848 1848 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1849 1849 raise util.Abort(_("received changelog group is empty"))
1850 1850 cnr = cl.count() - 1
1851 1851 changesets = cnr - cor
1852 1852
1853 1853 # pull off the manifest group
1854 1854 self.ui.status(_("adding manifests\n"))
1855 1855 chunkiter = changegroup.chunkiter(source)
1856 1856 # no need to check for empty manifest group here:
1857 1857 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1858 1858 # no new manifest will be created and the manifest group will
1859 1859 # be empty during the pull
1860 1860 self.manifest.addgroup(chunkiter, revmap, trp)
1861 1861
1862 1862 # process the files
1863 1863 self.ui.status(_("adding file changes\n"))
1864 1864 while 1:
1865 1865 f = changegroup.getchunk(source)
1866 1866 if not f:
1867 1867 break
1868 1868 self.ui.debug(_("adding %s revisions\n") % f)
1869 1869 fl = self.file(f)
1870 1870 o = fl.count()
1871 1871 chunkiter = changegroup.chunkiter(source)
1872 1872 if fl.addgroup(chunkiter, revmap, trp) is None:
1873 1873 raise util.Abort(_("received file revlog group is empty"))
1874 1874 revisions += fl.count() - o
1875 1875 files += 1
1876 1876
1877 1877 # make changelog see real files again
1878 1878 cl.finalize(trp)
1879 1879
1880 1880 newheads = len(self.changelog.heads())
1881 1881 heads = ""
1882 1882 if oldheads and newheads != oldheads:
1883 1883 heads = _(" (%+d heads)") % (newheads - oldheads)
1884 1884
1885 1885 self.ui.status(_("added %d changesets"
1886 1886 " with %d changes to %d files%s\n")
1887 1887 % (changesets, revisions, files, heads))
1888 1888
1889 1889 if changesets > 0:
1890 1890 self.hook('pretxnchangegroup', throw=True,
1891 1891 node=hex(self.changelog.node(cor+1)), source=srctype,
1892 1892 url=url)
1893 1893
1894 1894 tr.close()
1895 1895 finally:
1896 1896 del tr
1897 1897
1898 1898 if changesets > 0:
1899 1899 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1900 1900 source=srctype, url=url)
1901 1901
1902 1902 for i in xrange(cor + 1, cnr + 1):
1903 1903 self.hook("incoming", node=hex(self.changelog.node(i)),
1904 1904 source=srctype, url=url)
1905 1905
1906 1906 # never return 0 here:
1907 1907 if newheads < oldheads:
1908 1908 return newheads - oldheads - 1
1909 1909 else:
1910 1910 return newheads - oldheads + 1
1911 1911
1912 1912
1913 1913 def stream_in(self, remote):
1914 1914 fp = remote.stream_out()
1915 1915 l = fp.readline()
1916 1916 try:
1917 1917 resp = int(l)
1918 1918 except ValueError:
1919 1919 raise util.UnexpectedOutput(
1920 1920 _('Unexpected response from remote server:'), l)
1921 1921 if resp == 1:
1922 1922 raise util.Abort(_('operation forbidden by server'))
1923 1923 elif resp == 2:
1924 1924 raise util.Abort(_('locking the remote repository failed'))
1925 1925 elif resp != 0:
1926 1926 raise util.Abort(_('the server sent an unknown error code'))
1927 1927 self.ui.status(_('streaming all changes\n'))
1928 1928 l = fp.readline()
1929 1929 try:
1930 1930 total_files, total_bytes = map(int, l.split(' ', 1))
1931 1931 except ValueError, TypeError:
1932 1932 raise util.UnexpectedOutput(
1933 1933 _('Unexpected response from remote server:'), l)
1934 1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1935 1935 (total_files, util.bytecount(total_bytes)))
1936 1936 start = time.time()
1937 1937 for i in xrange(total_files):
1938 1938 # XXX doesn't support '\n' or '\r' in filenames
1939 1939 l = fp.readline()
1940 1940 try:
1941 1941 name, size = l.split('\0', 1)
1942 1942 size = int(size)
1943 1943 except ValueError, TypeError:
1944 1944 raise util.UnexpectedOutput(
1945 1945 _('Unexpected response from remote server:'), l)
1946 1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1947 1947 ofp = self.sopener(name, 'w')
1948 1948 for chunk in util.filechunkiter(fp, limit=size):
1949 1949 ofp.write(chunk)
1950 1950 ofp.close()
1951 1951 elapsed = time.time() - start
1952 1952 if elapsed <= 0:
1953 1953 elapsed = 0.001
1954 1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1955 1955 (util.bytecount(total_bytes), elapsed,
1956 1956 util.bytecount(total_bytes / elapsed)))
1957 1957 self.invalidate()
1958 1958 return len(self.heads()) + 1
1959 1959
1960 1960 def clone(self, remote, heads=[], stream=False):
1961 1961 '''clone remote repository.
1962 1962
1963 1963 keyword arguments:
1964 1964 heads: list of revs to clone (forces use of pull)
1965 1965 stream: use streaming clone if possible'''
1966 1966
1967 1967 # now, all clients that can request uncompressed clones can
1968 1968 # read repo formats supported by all servers that can serve
1969 1969 # them.
1970 1970
1971 1971 # if revlog format changes, client will have to check version
1972 1972 # and format flags on "stream" capability, and use
1973 1973 # uncompressed only if compatible.
1974 1974
1975 1975 if stream and not heads and remote.capable('stream'):
1976 1976 return self.stream_in(remote)
1977 1977 return self.pull(remote, heads)
1978 1978
1979 1979 # used to avoid circular references so destructors work
1980 1980 def aftertrans(files):
1981 1981 renamefiles = [tuple(t) for t in files]
1982 1982 def a():
1983 1983 for src, dest in renamefiles:
1984 1984 util.rename(src, dest)
1985 1985 return a
1986 1986
1987 1987 def instance(ui, path, create):
1988 1988 return localrepository(ui, util.drop_scheme('file', path), create)
1989 1989
1990 1990 def islocal(path):
1991 1991 return True
@@ -1,225 +1,225 b''
1 1 # sshrepo.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from remoterepo import *
10 10 from i18n import _
11 11 import repo, os, re, stat, util
12 12
13 13 class sshrepository(remoterepository):
14 14 def __init__(self, ui, path, create=0):
15 15 self._url = path
16 16 self.ui = ui
17 17
18 18 m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
19 19 if not m:
20 20 self.raise_(repo.RepoError(_("couldn't parse location %s") % path))
21 21
22 22 self.user = m.group(2)
23 23 self.host = m.group(3)
24 24 self.port = m.group(5)
25 25 self.path = m.group(7) or "."
26 26
27 27 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
28 28 args = self.port and ("%s -p %s") % (args, self.port) or args
29 29
30 30 sshcmd = self.ui.config("ui", "ssh", "ssh")
31 31 remotecmd = self.ui.config("ui", "remotecmd", "hg")
32 32
33 33 if create:
34 34 cmd = '%s %s "%s init %s"'
35 35 cmd = cmd % (sshcmd, args, remotecmd, self.path)
36 36
37 37 ui.note('running %s\n' % cmd)
38 38 res = os.system(cmd)
39 39 if res != 0:
40 40 self.raise_(repo.RepoError(_("could not create remote repo")))
41 41
42 42 self.validate_repo(ui, sshcmd, args, remotecmd)
43 43
44 44 def url(self):
45 45 return self._url
46 46
47 47 def validate_repo(self, ui, sshcmd, args, remotecmd):
48 48 # cleanup up previous run
49 49 self.cleanup()
50 50
51 51 cmd = '%s %s "%s -R %s serve --stdio"'
52 52 cmd = cmd % (sshcmd, args, remotecmd, self.path)
53 53
54 54 ui.note('running %s\n' % cmd)
55 55 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
56 56
57 57 # skip any noise generated by remote shell
58 58 self.do_cmd("hello")
59 59 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
60 60 lines = ["", "dummy"]
61 61 max_noise = 500
62 62 while lines[-1] and max_noise:
63 63 l = r.readline()
64 64 self.readerr()
65 65 if lines[-1] == "1\n" and l == "\n":
66 66 break
67 67 if l:
68 68 ui.debug(_("remote: "), l)
69 69 lines.append(l)
70 70 max_noise -= 1
71 71 else:
72 72 self.raise_(repo.RepoError(_("no suitable response from remote hg")))
73 73
74 self.capabilities = ()
74 self.capabilities = util.set()
75 75 lines.reverse()
76 76 for l in lines:
77 77 if l.startswith("capabilities:"):
78 self.capabilities = l[:-1].split(":")[1].split()
78 self.capabilities.update(l[:-1].split(":")[1].split())
79 79 break
80 80
81 81 def readerr(self):
82 82 while 1:
83 83 size = util.fstat(self.pipee).st_size
84 84 if size == 0: break
85 85 l = self.pipee.readline()
86 86 if not l: break
87 87 self.ui.status(_("remote: "), l)
88 88
89 89 def raise_(self, exception):
90 90 self.cleanup()
91 91 raise exception
92 92
93 93 def cleanup(self):
94 94 try:
95 95 self.pipeo.close()
96 96 self.pipei.close()
97 97 # read the error descriptor until EOF
98 98 for l in self.pipee:
99 99 self.ui.status(_("remote: "), l)
100 100 self.pipee.close()
101 101 except:
102 102 pass
103 103
104 104 __del__ = cleanup
105 105
106 106 def do_cmd(self, cmd, **args):
107 107 self.ui.debug(_("sending %s command\n") % cmd)
108 108 self.pipeo.write("%s\n" % cmd)
109 109 for k, v in args.items():
110 110 self.pipeo.write("%s %d\n" % (k, len(v)))
111 111 self.pipeo.write(v)
112 112 self.pipeo.flush()
113 113
114 114 return self.pipei
115 115
116 116 def call(self, cmd, **args):
117 117 r = self.do_cmd(cmd, **args)
118 118 l = r.readline()
119 119 self.readerr()
120 120 try:
121 121 l = int(l)
122 122 except:
123 123 self.raise_(util.UnexpectedOutput(_("unexpected response:"), l))
124 124 return r.read(l)
125 125
126 126 def lock(self):
127 127 self.call("lock")
128 128 return remotelock(self)
129 129
130 130 def unlock(self):
131 131 self.call("unlock")
132 132
133 133 def lookup(self, key):
134 134 d = self.call("lookup", key=key)
135 135 success, data = d[:-1].split(" ", 1)
136 136 if int(success):
137 137 return bin(data)
138 138 else:
139 139 self.raise_(repo.RepoError(data))
140 140
141 141 def heads(self):
142 142 d = self.call("heads")
143 143 try:
144 144 return map(bin, d[:-1].split(" "))
145 145 except:
146 146 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
147 147
148 148 def branches(self, nodes):
149 149 n = " ".join(map(hex, nodes))
150 150 d = self.call("branches", nodes=n)
151 151 try:
152 152 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
153 153 return br
154 154 except:
155 155 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
156 156
157 157 def between(self, pairs):
158 158 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
159 159 d = self.call("between", pairs=n)
160 160 try:
161 161 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
162 162 return p
163 163 except:
164 164 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
165 165
166 166 def changegroup(self, nodes, kind):
167 167 n = " ".join(map(hex, nodes))
168 168 return self.do_cmd("changegroup", roots=n)
169 169
170 170 def changegroupsubset(self, bases, heads, kind):
171 171 bases = " ".join(map(hex, bases))
172 172 heads = " ".join(map(hex, heads))
173 173 return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
174 174
175 175 def unbundle(self, cg, heads, source):
176 176 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
177 177 if d:
178 178 # remote may send "unsynced changes"
179 179 self.raise_(repo.RepoError(_("push refused: %s") % d))
180 180
181 181 while 1:
182 182 d = cg.read(4096)
183 183 if not d: break
184 184 self.pipeo.write(str(len(d)) + '\n')
185 185 self.pipeo.write(d)
186 186 self.readerr()
187 187
188 188 self.pipeo.write('0\n')
189 189 self.pipeo.flush()
190 190
191 191 self.readerr()
192 192 l = int(self.pipei.readline())
193 193 r = self.pipei.read(l)
194 194 if r:
195 195 # remote may send "unsynced changes"
196 196 self.raise_(hg.RepoError(_("push failed: %s") % r))
197 197
198 198 self.readerr()
199 199 l = int(self.pipei.readline())
200 200 r = self.pipei.read(l)
201 201 return int(r)
202 202
203 203 def addchangegroup(self, cg, source, url):
204 204 d = self.call("addchangegroup")
205 205 if d:
206 206 self.raise_(repo.RepoError(_("push refused: %s") % d))
207 207 while 1:
208 208 d = cg.read(4096)
209 209 if not d: break
210 210 self.pipeo.write(d)
211 211 self.readerr()
212 212
213 213 self.pipeo.flush()
214 214
215 215 self.readerr()
216 216 l = int(self.pipei.readline())
217 217 r = self.pipei.read(l)
218 218 if not r:
219 219 return 1
220 220 return int(r)
221 221
222 222 def stream_out(self):
223 223 return self.do_cmd('stream_out')
224 224
225 225 instance = sshrepository
General Comments 0
You need to be logged in to leave comments. Login now