##// END OF EJS Templates
Turn capabilities into a mutable set, instead of a fixed tuple.
Bryan O'Sullivan -
r5258:b534c502 default
parent child Browse files
Show More
@@ -1,454 +1,454 b''
1 # httprepo.py - HTTP repository proxy classes for mercurial
1 # httprepo.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from remoterepo import *
10 from remoterepo import *
11 from i18n import _
11 from i18n import _
12 import repo, os, urllib, urllib2, urlparse, zlib, util, httplib
12 import repo, os, urllib, urllib2, urlparse, zlib, util, httplib
13 import errno, keepalive, tempfile, socket, changegroup
13 import errno, keepalive, tempfile, socket, changegroup
14
14
15 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
15 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
16 def __init__(self, ui):
16 def __init__(self, ui):
17 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
17 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
18 self.ui = ui
18 self.ui = ui
19
19
20 def find_user_password(self, realm, authuri):
20 def find_user_password(self, realm, authuri):
21 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
21 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
22 self, realm, authuri)
22 self, realm, authuri)
23 user, passwd = authinfo
23 user, passwd = authinfo
24 if user and passwd:
24 if user and passwd:
25 return (user, passwd)
25 return (user, passwd)
26
26
27 if not self.ui.interactive:
27 if not self.ui.interactive:
28 raise util.Abort(_('http authorization required'))
28 raise util.Abort(_('http authorization required'))
29
29
30 self.ui.write(_("http authorization required\n"))
30 self.ui.write(_("http authorization required\n"))
31 self.ui.status(_("realm: %s\n") % realm)
31 self.ui.status(_("realm: %s\n") % realm)
32 if user:
32 if user:
33 self.ui.status(_("user: %s\n") % user)
33 self.ui.status(_("user: %s\n") % user)
34 else:
34 else:
35 user = self.ui.prompt(_("user:"), default=None)
35 user = self.ui.prompt(_("user:"), default=None)
36
36
37 if not passwd:
37 if not passwd:
38 passwd = self.ui.getpass()
38 passwd = self.ui.getpass()
39
39
40 self.add_password(realm, authuri, user, passwd)
40 self.add_password(realm, authuri, user, passwd)
41 return (user, passwd)
41 return (user, passwd)
42
42
43 def netlocsplit(netloc):
43 def netlocsplit(netloc):
44 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
44 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
45
45
46 a = netloc.find('@')
46 a = netloc.find('@')
47 if a == -1:
47 if a == -1:
48 user, passwd = None, None
48 user, passwd = None, None
49 else:
49 else:
50 userpass, netloc = netloc[:a], netloc[a+1:]
50 userpass, netloc = netloc[:a], netloc[a+1:]
51 c = userpass.find(':')
51 c = userpass.find(':')
52 if c == -1:
52 if c == -1:
53 user, passwd = urllib.unquote(userpass), None
53 user, passwd = urllib.unquote(userpass), None
54 else:
54 else:
55 user = urllib.unquote(userpass[:c])
55 user = urllib.unquote(userpass[:c])
56 passwd = urllib.unquote(userpass[c+1:])
56 passwd = urllib.unquote(userpass[c+1:])
57 c = netloc.find(':')
57 c = netloc.find(':')
58 if c == -1:
58 if c == -1:
59 host, port = netloc, None
59 host, port = netloc, None
60 else:
60 else:
61 host, port = netloc[:c], netloc[c+1:]
61 host, port = netloc[:c], netloc[c+1:]
62 return host, port, user, passwd
62 return host, port, user, passwd
63
63
64 def netlocunsplit(host, port, user=None, passwd=None):
64 def netlocunsplit(host, port, user=None, passwd=None):
65 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
65 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
66 if port:
66 if port:
67 hostport = host + ':' + port
67 hostport = host + ':' + port
68 else:
68 else:
69 hostport = host
69 hostport = host
70 if user:
70 if user:
71 if passwd:
71 if passwd:
72 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
72 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
73 else:
73 else:
74 userpass = urllib.quote(user)
74 userpass = urllib.quote(user)
75 return userpass + '@' + hostport
75 return userpass + '@' + hostport
76 return hostport
76 return hostport
77
77
78 # work around a bug in Python < 2.4.2
78 # work around a bug in Python < 2.4.2
79 # (it leaves a "\n" at the end of Proxy-authorization headers)
79 # (it leaves a "\n" at the end of Proxy-authorization headers)
80 class request(urllib2.Request):
80 class request(urllib2.Request):
81 def add_header(self, key, val):
81 def add_header(self, key, val):
82 if key.lower() == 'proxy-authorization':
82 if key.lower() == 'proxy-authorization':
83 val = val.strip()
83 val = val.strip()
84 return urllib2.Request.add_header(self, key, val)
84 return urllib2.Request.add_header(self, key, val)
85
85
86 class httpsendfile(file):
86 class httpsendfile(file):
87 def __len__(self):
87 def __len__(self):
88 return os.fstat(self.fileno()).st_size
88 return os.fstat(self.fileno()).st_size
89
89
90 def _gen_sendfile(connection):
90 def _gen_sendfile(connection):
91 def _sendfile(self, data):
91 def _sendfile(self, data):
92 # send a file
92 # send a file
93 if isinstance(data, httpsendfile):
93 if isinstance(data, httpsendfile):
94 # if auth required, some data sent twice, so rewind here
94 # if auth required, some data sent twice, so rewind here
95 data.seek(0)
95 data.seek(0)
96 for chunk in util.filechunkiter(data):
96 for chunk in util.filechunkiter(data):
97 connection.send(self, chunk)
97 connection.send(self, chunk)
98 else:
98 else:
99 connection.send(self, data)
99 connection.send(self, data)
100 return _sendfile
100 return _sendfile
101
101
102 class httpconnection(keepalive.HTTPConnection):
102 class httpconnection(keepalive.HTTPConnection):
103 # must be able to send big bundle as stream.
103 # must be able to send big bundle as stream.
104 send = _gen_sendfile(keepalive.HTTPConnection)
104 send = _gen_sendfile(keepalive.HTTPConnection)
105
105
106 class basehttphandler(keepalive.HTTPHandler):
106 class basehttphandler(keepalive.HTTPHandler):
107 def http_open(self, req):
107 def http_open(self, req):
108 return self.do_open(httpconnection, req)
108 return self.do_open(httpconnection, req)
109
109
110 has_https = hasattr(urllib2, 'HTTPSHandler')
110 has_https = hasattr(urllib2, 'HTTPSHandler')
111 if has_https:
111 if has_https:
112 class httpsconnection(httplib.HTTPSConnection):
112 class httpsconnection(httplib.HTTPSConnection):
113 response_class = keepalive.HTTPResponse
113 response_class = keepalive.HTTPResponse
114 # must be able to send big bundle as stream.
114 # must be able to send big bundle as stream.
115 send = _gen_sendfile(httplib.HTTPSConnection)
115 send = _gen_sendfile(httplib.HTTPSConnection)
116
116
117 class httphandler(basehttphandler, urllib2.HTTPSHandler):
117 class httphandler(basehttphandler, urllib2.HTTPSHandler):
118 def https_open(self, req):
118 def https_open(self, req):
119 return self.do_open(httpsconnection, req)
119 return self.do_open(httpsconnection, req)
120 else:
120 else:
121 class httphandler(basehttphandler):
121 class httphandler(basehttphandler):
122 pass
122 pass
123
123
124 # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
124 # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
125 # it doesn't know about the auth type requested. This can happen if
125 # it doesn't know about the auth type requested. This can happen if
126 # somebody is using BasicAuth and types a bad password.
126 # somebody is using BasicAuth and types a bad password.
127 class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
127 class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
128 def http_error_auth_reqed(self, auth_header, host, req, headers):
128 def http_error_auth_reqed(self, auth_header, host, req, headers):
129 try:
129 try:
130 return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
130 return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
131 self, auth_header, host, req, headers)
131 self, auth_header, host, req, headers)
132 except ValueError, inst:
132 except ValueError, inst:
133 arg = inst.args[0]
133 arg = inst.args[0]
134 if arg.startswith("AbstractDigestAuthHandler doesn't know "):
134 if arg.startswith("AbstractDigestAuthHandler doesn't know "):
135 return
135 return
136 raise
136 raise
137
137
138 def zgenerator(f):
138 def zgenerator(f):
139 zd = zlib.decompressobj()
139 zd = zlib.decompressobj()
140 try:
140 try:
141 for chunk in util.filechunkiter(f):
141 for chunk in util.filechunkiter(f):
142 yield zd.decompress(chunk)
142 yield zd.decompress(chunk)
143 except httplib.HTTPException, inst:
143 except httplib.HTTPException, inst:
144 raise IOError(None, _('connection ended unexpectedly'))
144 raise IOError(None, _('connection ended unexpectedly'))
145 yield zd.flush()
145 yield zd.flush()
146
146
147 _safe = ('abcdefghijklmnopqrstuvwxyz'
147 _safe = ('abcdefghijklmnopqrstuvwxyz'
148 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
148 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
149 '0123456789' '_.-/')
149 '0123456789' '_.-/')
150 _safeset = None
150 _safeset = None
151 _hex = None
151 _hex = None
152 def quotepath(path):
152 def quotepath(path):
153 '''quote the path part of a URL
153 '''quote the path part of a URL
154
154
155 This is similar to urllib.quote, but it also tries to avoid
155 This is similar to urllib.quote, but it also tries to avoid
156 quoting things twice (inspired by wget):
156 quoting things twice (inspired by wget):
157
157
158 >>> quotepath('abc def')
158 >>> quotepath('abc def')
159 'abc%20def'
159 'abc%20def'
160 >>> quotepath('abc%20def')
160 >>> quotepath('abc%20def')
161 'abc%20def'
161 'abc%20def'
162 >>> quotepath('abc%20 def')
162 >>> quotepath('abc%20 def')
163 'abc%20%20def'
163 'abc%20%20def'
164 >>> quotepath('abc def%20')
164 >>> quotepath('abc def%20')
165 'abc%20def%20'
165 'abc%20def%20'
166 >>> quotepath('abc def%2')
166 >>> quotepath('abc def%2')
167 'abc%20def%252'
167 'abc%20def%252'
168 >>> quotepath('abc def%')
168 >>> quotepath('abc def%')
169 'abc%20def%25'
169 'abc%20def%25'
170 '''
170 '''
171 global _safeset, _hex
171 global _safeset, _hex
172 if _safeset is None:
172 if _safeset is None:
173 _safeset = util.set(_safe)
173 _safeset = util.set(_safe)
174 _hex = util.set('abcdefABCDEF0123456789')
174 _hex = util.set('abcdefABCDEF0123456789')
175 l = list(path)
175 l = list(path)
176 for i in xrange(len(l)):
176 for i in xrange(len(l)):
177 c = l[i]
177 c = l[i]
178 if c == '%' and i + 2 < len(l) and (l[i+1] in _hex and l[i+2] in _hex):
178 if c == '%' and i + 2 < len(l) and (l[i+1] in _hex and l[i+2] in _hex):
179 pass
179 pass
180 elif c not in _safeset:
180 elif c not in _safeset:
181 l[i] = '%%%02X' % ord(c)
181 l[i] = '%%%02X' % ord(c)
182 return ''.join(l)
182 return ''.join(l)
183
183
184 class httprepository(remoterepository):
184 class httprepository(remoterepository):
185 def __init__(self, ui, path):
185 def __init__(self, ui, path):
186 self.path = path
186 self.path = path
187 self.caps = None
187 self.caps = None
188 self.handler = None
188 self.handler = None
189 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
189 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
190 if query or frag:
190 if query or frag:
191 raise util.Abort(_('unsupported URL component: "%s"') %
191 raise util.Abort(_('unsupported URL component: "%s"') %
192 (query or frag))
192 (query or frag))
193 if not urlpath:
193 if not urlpath:
194 urlpath = '/'
194 urlpath = '/'
195 urlpath = quotepath(urlpath)
195 urlpath = quotepath(urlpath)
196 host, port, user, passwd = netlocsplit(netloc)
196 host, port, user, passwd = netlocsplit(netloc)
197
197
198 # urllib cannot handle URLs with embedded user or passwd
198 # urllib cannot handle URLs with embedded user or passwd
199 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
199 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
200 urlpath, '', ''))
200 urlpath, '', ''))
201 self.ui = ui
201 self.ui = ui
202 self.ui.debug(_('using %s\n') % self._url)
202 self.ui.debug(_('using %s\n') % self._url)
203
203
204 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
204 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
205 # XXX proxyauthinfo = None
205 # XXX proxyauthinfo = None
206 self.handler = httphandler()
206 self.handler = httphandler()
207 handlers = [self.handler]
207 handlers = [self.handler]
208
208
209 if proxyurl:
209 if proxyurl:
210 # proxy can be proper url or host[:port]
210 # proxy can be proper url or host[:port]
211 if not (proxyurl.startswith('http:') or
211 if not (proxyurl.startswith('http:') or
212 proxyurl.startswith('https:')):
212 proxyurl.startswith('https:')):
213 proxyurl = 'http://' + proxyurl + '/'
213 proxyurl = 'http://' + proxyurl + '/'
214 snpqf = urlparse.urlsplit(proxyurl)
214 snpqf = urlparse.urlsplit(proxyurl)
215 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
215 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
216 hpup = netlocsplit(proxynetloc)
216 hpup = netlocsplit(proxynetloc)
217
217
218 proxyhost, proxyport, proxyuser, proxypasswd = hpup
218 proxyhost, proxyport, proxyuser, proxypasswd = hpup
219 if not proxyuser:
219 if not proxyuser:
220 proxyuser = ui.config("http_proxy", "user")
220 proxyuser = ui.config("http_proxy", "user")
221 proxypasswd = ui.config("http_proxy", "passwd")
221 proxypasswd = ui.config("http_proxy", "passwd")
222
222
223 # see if we should use a proxy for this url
223 # see if we should use a proxy for this url
224 no_list = [ "localhost", "127.0.0.1" ]
224 no_list = [ "localhost", "127.0.0.1" ]
225 no_list.extend([p.lower() for
225 no_list.extend([p.lower() for
226 p in ui.configlist("http_proxy", "no")])
226 p in ui.configlist("http_proxy", "no")])
227 no_list.extend([p.strip().lower() for
227 no_list.extend([p.strip().lower() for
228 p in os.getenv("no_proxy", '').split(',')
228 p in os.getenv("no_proxy", '').split(',')
229 if p.strip()])
229 if p.strip()])
230 # "http_proxy.always" config is for running tests on localhost
230 # "http_proxy.always" config is for running tests on localhost
231 if (not ui.configbool("http_proxy", "always") and
231 if (not ui.configbool("http_proxy", "always") and
232 host.lower() in no_list):
232 host.lower() in no_list):
233 ui.debug(_('disabling proxy for %s\n') % host)
233 ui.debug(_('disabling proxy for %s\n') % host)
234 else:
234 else:
235 proxyurl = urlparse.urlunsplit((
235 proxyurl = urlparse.urlunsplit((
236 proxyscheme, netlocunsplit(proxyhost, proxyport,
236 proxyscheme, netlocunsplit(proxyhost, proxyport,
237 proxyuser, proxypasswd or ''),
237 proxyuser, proxypasswd or ''),
238 proxypath, proxyquery, proxyfrag))
238 proxypath, proxyquery, proxyfrag))
239 handlers.append(urllib2.ProxyHandler({scheme: proxyurl}))
239 handlers.append(urllib2.ProxyHandler({scheme: proxyurl}))
240 ui.debug(_('proxying through http://%s:%s\n') %
240 ui.debug(_('proxying through http://%s:%s\n') %
241 (proxyhost, proxyport))
241 (proxyhost, proxyport))
242
242
243 # urllib2 takes proxy values from the environment and those
243 # urllib2 takes proxy values from the environment and those
244 # will take precedence if found, so drop them
244 # will take precedence if found, so drop them
245 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
245 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
246 try:
246 try:
247 if os.environ.has_key(env):
247 if os.environ.has_key(env):
248 del os.environ[env]
248 del os.environ[env]
249 except OSError:
249 except OSError:
250 pass
250 pass
251
251
252 passmgr = passwordmgr(ui)
252 passmgr = passwordmgr(ui)
253 if user:
253 if user:
254 ui.debug(_('http auth: user %s, password %s\n') %
254 ui.debug(_('http auth: user %s, password %s\n') %
255 (user, passwd and '*' * len(passwd) or 'not set'))
255 (user, passwd and '*' * len(passwd) or 'not set'))
256 passmgr.add_password(None, self._url, user, passwd or '')
256 passmgr.add_password(None, self._url, user, passwd or '')
257
257
258 handlers.extend((urllib2.HTTPBasicAuthHandler(passmgr),
258 handlers.extend((urllib2.HTTPBasicAuthHandler(passmgr),
259 httpdigestauthhandler(passmgr)))
259 httpdigestauthhandler(passmgr)))
260 opener = urllib2.build_opener(*handlers)
260 opener = urllib2.build_opener(*handlers)
261
261
262 # 1.0 here is the _protocol_ version
262 # 1.0 here is the _protocol_ version
263 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
263 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
264 urllib2.install_opener(opener)
264 urllib2.install_opener(opener)
265
265
266 def __del__(self):
266 def __del__(self):
267 if self.handler:
267 if self.handler:
268 self.handler.close_all()
268 self.handler.close_all()
269 self.handler = None
269 self.handler = None
270
270
271 def url(self):
271 def url(self):
272 return self.path
272 return self.path
273
273
274 # look up capabilities only when needed
274 # look up capabilities only when needed
275
275
276 def get_caps(self):
276 def get_caps(self):
277 if self.caps is None:
277 if self.caps is None:
278 try:
278 try:
279 self.caps = self.do_read('capabilities').split()
279 self.caps = util.set(self.do_read('capabilities').split())
280 except repo.RepoError:
280 except repo.RepoError:
281 self.caps = ()
281 self.caps = util.set()
282 self.ui.debug(_('capabilities: %s\n') %
282 self.ui.debug(_('capabilities: %s\n') %
283 (' '.join(self.caps or ['none'])))
283 (' '.join(self.caps or ['none'])))
284 return self.caps
284 return self.caps
285
285
286 capabilities = property(get_caps)
286 capabilities = property(get_caps)
287
287
288 def lock(self):
288 def lock(self):
289 raise util.Abort(_('operation not supported over http'))
289 raise util.Abort(_('operation not supported over http'))
290
290
291 def do_cmd(self, cmd, **args):
291 def do_cmd(self, cmd, **args):
292 data = args.pop('data', None)
292 data = args.pop('data', None)
293 headers = args.pop('headers', {})
293 headers = args.pop('headers', {})
294 self.ui.debug(_("sending %s command\n") % cmd)
294 self.ui.debug(_("sending %s command\n") % cmd)
295 q = {"cmd": cmd}
295 q = {"cmd": cmd}
296 q.update(args)
296 q.update(args)
297 qs = '?%s' % urllib.urlencode(q)
297 qs = '?%s' % urllib.urlencode(q)
298 cu = "%s%s" % (self._url, qs)
298 cu = "%s%s" % (self._url, qs)
299 try:
299 try:
300 if data:
300 if data:
301 self.ui.debug(_("sending %s bytes\n") %
301 self.ui.debug(_("sending %s bytes\n") %
302 headers.get('content-length', 'X'))
302 headers.get('content-length', 'X'))
303 resp = urllib2.urlopen(request(cu, data, headers))
303 resp = urllib2.urlopen(request(cu, data, headers))
304 except urllib2.HTTPError, inst:
304 except urllib2.HTTPError, inst:
305 if inst.code == 401:
305 if inst.code == 401:
306 raise util.Abort(_('authorization failed'))
306 raise util.Abort(_('authorization failed'))
307 raise
307 raise
308 except httplib.HTTPException, inst:
308 except httplib.HTTPException, inst:
309 self.ui.debug(_('http error while sending %s command\n') % cmd)
309 self.ui.debug(_('http error while sending %s command\n') % cmd)
310 self.ui.print_exc()
310 self.ui.print_exc()
311 raise IOError(None, inst)
311 raise IOError(None, inst)
312 except IndexError:
312 except IndexError:
313 # this only happens with Python 2.3, later versions raise URLError
313 # this only happens with Python 2.3, later versions raise URLError
314 raise util.Abort(_('http error, possibly caused by proxy setting'))
314 raise util.Abort(_('http error, possibly caused by proxy setting'))
315 # record the url we got redirected to
315 # record the url we got redirected to
316 resp_url = resp.geturl()
316 resp_url = resp.geturl()
317 if resp_url.endswith(qs):
317 if resp_url.endswith(qs):
318 resp_url = resp_url[:-len(qs)]
318 resp_url = resp_url[:-len(qs)]
319 if self._url != resp_url:
319 if self._url != resp_url:
320 self.ui.status(_('real URL is %s\n') % resp_url)
320 self.ui.status(_('real URL is %s\n') % resp_url)
321 self._url = resp_url
321 self._url = resp_url
322 try:
322 try:
323 proto = resp.getheader('content-type')
323 proto = resp.getheader('content-type')
324 except AttributeError:
324 except AttributeError:
325 proto = resp.headers['content-type']
325 proto = resp.headers['content-type']
326
326
327 # accept old "text/plain" and "application/hg-changegroup" for now
327 # accept old "text/plain" and "application/hg-changegroup" for now
328 if not (proto.startswith('application/mercurial-') or
328 if not (proto.startswith('application/mercurial-') or
329 proto.startswith('text/plain') or
329 proto.startswith('text/plain') or
330 proto.startswith('application/hg-changegroup')):
330 proto.startswith('application/hg-changegroup')):
331 self.ui.debug(_("Requested URL: '%s'\n") % cu)
331 self.ui.debug(_("Requested URL: '%s'\n") % cu)
332 raise repo.RepoError(_("'%s' does not appear to be an hg repository")
332 raise repo.RepoError(_("'%s' does not appear to be an hg repository")
333 % self._url)
333 % self._url)
334
334
335 if proto.startswith('application/mercurial-'):
335 if proto.startswith('application/mercurial-'):
336 try:
336 try:
337 version = proto.split('-', 1)[1]
337 version = proto.split('-', 1)[1]
338 version_info = tuple([int(n) for n in version.split('.')])
338 version_info = tuple([int(n) for n in version.split('.')])
339 except ValueError:
339 except ValueError:
340 raise repo.RepoError(_("'%s' sent a broken Content-type "
340 raise repo.RepoError(_("'%s' sent a broken Content-type "
341 "header (%s)") % (self._url, proto))
341 "header (%s)") % (self._url, proto))
342 if version_info > (0, 1):
342 if version_info > (0, 1):
343 raise repo.RepoError(_("'%s' uses newer protocol %s") %
343 raise repo.RepoError(_("'%s' uses newer protocol %s") %
344 (self._url, version))
344 (self._url, version))
345
345
346 return resp
346 return resp
347
347
348 def do_read(self, cmd, **args):
348 def do_read(self, cmd, **args):
349 fp = self.do_cmd(cmd, **args)
349 fp = self.do_cmd(cmd, **args)
350 try:
350 try:
351 return fp.read()
351 return fp.read()
352 finally:
352 finally:
353 # if using keepalive, allow connection to be reused
353 # if using keepalive, allow connection to be reused
354 fp.close()
354 fp.close()
355
355
356 def lookup(self, key):
356 def lookup(self, key):
357 d = self.do_cmd("lookup", key = key).read()
357 d = self.do_cmd("lookup", key = key).read()
358 success, data = d[:-1].split(' ', 1)
358 success, data = d[:-1].split(' ', 1)
359 if int(success):
359 if int(success):
360 return bin(data)
360 return bin(data)
361 raise repo.RepoError(data)
361 raise repo.RepoError(data)
362
362
363 def heads(self):
363 def heads(self):
364 d = self.do_read("heads")
364 d = self.do_read("heads")
365 try:
365 try:
366 return map(bin, d[:-1].split(" "))
366 return map(bin, d[:-1].split(" "))
367 except:
367 except:
368 raise util.UnexpectedOutput(_("unexpected response:"), d)
368 raise util.UnexpectedOutput(_("unexpected response:"), d)
369
369
370 def branches(self, nodes):
370 def branches(self, nodes):
371 n = " ".join(map(hex, nodes))
371 n = " ".join(map(hex, nodes))
372 d = self.do_read("branches", nodes=n)
372 d = self.do_read("branches", nodes=n)
373 try:
373 try:
374 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
374 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
375 return br
375 return br
376 except:
376 except:
377 raise util.UnexpectedOutput(_("unexpected response:"), d)
377 raise util.UnexpectedOutput(_("unexpected response:"), d)
378
378
379 def between(self, pairs):
379 def between(self, pairs):
380 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
380 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
381 d = self.do_read("between", pairs=n)
381 d = self.do_read("between", pairs=n)
382 try:
382 try:
383 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
383 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
384 return p
384 return p
385 except:
385 except:
386 raise util.UnexpectedOutput(_("unexpected response:"), d)
386 raise util.UnexpectedOutput(_("unexpected response:"), d)
387
387
388 def changegroup(self, nodes, kind):
388 def changegroup(self, nodes, kind):
389 n = " ".join(map(hex, nodes))
389 n = " ".join(map(hex, nodes))
390 f = self.do_cmd("changegroup", roots=n)
390 f = self.do_cmd("changegroup", roots=n)
391 return util.chunkbuffer(zgenerator(f))
391 return util.chunkbuffer(zgenerator(f))
392
392
393 def changegroupsubset(self, bases, heads, source):
393 def changegroupsubset(self, bases, heads, source):
394 baselst = " ".join([hex(n) for n in bases])
394 baselst = " ".join([hex(n) for n in bases])
395 headlst = " ".join([hex(n) for n in heads])
395 headlst = " ".join([hex(n) for n in heads])
396 f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
396 f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
397 return util.chunkbuffer(zgenerator(f))
397 return util.chunkbuffer(zgenerator(f))
398
398
399 def unbundle(self, cg, heads, source):
399 def unbundle(self, cg, heads, source):
400 # have to stream bundle to a temp file because we do not have
400 # have to stream bundle to a temp file because we do not have
401 # http 1.1 chunked transfer.
401 # http 1.1 chunked transfer.
402
402
403 type = ""
403 type = ""
404 types = self.capable('unbundle')
404 types = self.capable('unbundle')
405 # servers older than d1b16a746db6 will send 'unbundle' as a
405 # servers older than d1b16a746db6 will send 'unbundle' as a
406 # boolean capability
406 # boolean capability
407 try:
407 try:
408 types = types.split(',')
408 types = types.split(',')
409 except AttributeError:
409 except AttributeError:
410 types = [""]
410 types = [""]
411 if types:
411 if types:
412 for x in types:
412 for x in types:
413 if x in changegroup.bundletypes:
413 if x in changegroup.bundletypes:
414 type = x
414 type = x
415 break
415 break
416
416
417 tempname = changegroup.writebundle(cg, None, type)
417 tempname = changegroup.writebundle(cg, None, type)
418 fp = httpsendfile(tempname, "rb")
418 fp = httpsendfile(tempname, "rb")
419 try:
419 try:
420 try:
420 try:
421 rfp = self.do_cmd(
421 rfp = self.do_cmd(
422 'unbundle', data=fp,
422 'unbundle', data=fp,
423 headers={'content-type': 'application/octet-stream'},
423 headers={'content-type': 'application/octet-stream'},
424 heads=' '.join(map(hex, heads)))
424 heads=' '.join(map(hex, heads)))
425 try:
425 try:
426 ret = int(rfp.readline())
426 ret = int(rfp.readline())
427 self.ui.write(rfp.read())
427 self.ui.write(rfp.read())
428 return ret
428 return ret
429 finally:
429 finally:
430 rfp.close()
430 rfp.close()
431 except socket.error, err:
431 except socket.error, err:
432 if err[0] in (errno.ECONNRESET, errno.EPIPE):
432 if err[0] in (errno.ECONNRESET, errno.EPIPE):
433 raise util.Abort(_('push failed: %s') % err[1])
433 raise util.Abort(_('push failed: %s') % err[1])
434 raise util.Abort(err[1])
434 raise util.Abort(err[1])
435 finally:
435 finally:
436 fp.close()
436 fp.close()
437 os.unlink(tempname)
437 os.unlink(tempname)
438
438
439 def stream_out(self):
439 def stream_out(self):
440 return self.do_cmd('stream_out')
440 return self.do_cmd('stream_out')
441
441
442 class httpsrepository(httprepository):
442 class httpsrepository(httprepository):
443 def __init__(self, ui, path):
443 def __init__(self, ui, path):
444 if not has_https:
444 if not has_https:
445 raise util.Abort(_('Python support for SSL and HTTPS '
445 raise util.Abort(_('Python support for SSL and HTTPS '
446 'is not installed'))
446 'is not installed'))
447 httprepository.__init__(self, ui, path)
447 httprepository.__init__(self, ui, path)
448
448
449 def instance(ui, path, create):
449 def instance(ui, path, create):
450 if create:
450 if create:
451 raise util.Abort(_('cannot create new http repository'))
451 raise util.Abort(_('cannot create new http repository'))
452 if path.startswith('https:'):
452 if path.startswith('https:'):
453 return httpsrepository(ui, path)
453 return httpsrepository(ui, path)
454 return httprepository(ui, path)
454 return httprepository(ui, path)
@@ -1,1991 +1,1991 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.path = path
21 self.path = path
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.sopener = util.encodedopener(util.opener(self.spath),
73 self.encodefn)
73 self.encodefn)
74
74
75 self.ui = ui.ui(parentui=parentui)
75 self.ui = ui.ui(parentui=parentui)
76 try:
76 try:
77 self.ui.readconfig(self.join("hgrc"), self.root)
77 self.ui.readconfig(self.join("hgrc"), self.root)
78 extensions.loadall(self.ui)
78 extensions.loadall(self.ui)
79 except IOError:
79 except IOError:
80 pass
80 pass
81
81
82 self.tagscache = None
82 self.tagscache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError, name
101 raise AttributeError, name
102
102
103 def url(self):
103 def url(self):
104 return 'file:' + self.root
104 return 'file:' + self.root
105
105
106 def hook(self, name, throw=False, **args):
106 def hook(self, name, throw=False, **args):
107 return hook.hook(self.ui, self, name, throw, **args)
107 return hook.hook(self.ui, self, name, throw, **args)
108
108
109 tag_disallowed = ':\r\n'
109 tag_disallowed = ':\r\n'
110
110
111 def _tag(self, name, node, message, local, user, date, parent=None,
111 def _tag(self, name, node, message, local, user, date, parent=None,
112 extra={}):
112 extra={}):
113 use_dirstate = parent is None
113 use_dirstate = parent is None
114
114
115 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
116 if c in name:
116 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
118
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
120
121 def writetag(fp, name, munge, prevtags):
121 def writetag(fp, name, munge, prevtags):
122 if prevtags and prevtags[-1] != '\n':
122 if prevtags and prevtags[-1] != '\n':
123 fp.write('\n')
123 fp.write('\n')
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.close()
125 fp.close()
126 self.hook('tag', node=hex(node), tag=name, local=local)
126 self.hook('tag', node=hex(node), tag=name, local=local)
127
127
128 prevtags = ''
128 prevtags = ''
129 if local:
129 if local:
130 try:
130 try:
131 fp = self.opener('localtags', 'r+')
131 fp = self.opener('localtags', 'r+')
132 except IOError, err:
132 except IOError, err:
133 fp = self.opener('localtags', 'a')
133 fp = self.opener('localtags', 'a')
134 else:
134 else:
135 prevtags = fp.read()
135 prevtags = fp.read()
136
136
137 # local tags are stored in the current charset
137 # local tags are stored in the current charset
138 writetag(fp, name, None, prevtags)
138 writetag(fp, name, None, prevtags)
139 return
139 return
140
140
141 if use_dirstate:
141 if use_dirstate:
142 try:
142 try:
143 fp = self.wfile('.hgtags', 'rb+')
143 fp = self.wfile('.hgtags', 'rb+')
144 except IOError, err:
144 except IOError, err:
145 fp = self.wfile('.hgtags', 'ab')
145 fp = self.wfile('.hgtags', 'ab')
146 else:
146 else:
147 prevtags = fp.read()
147 prevtags = fp.read()
148 else:
148 else:
149 try:
149 try:
150 prevtags = self.filectx('.hgtags', parent).data()
150 prevtags = self.filectx('.hgtags', parent).data()
151 except revlog.LookupError:
151 except revlog.LookupError:
152 pass
152 pass
153 fp = self.wfile('.hgtags', 'wb')
153 fp = self.wfile('.hgtags', 'wb')
154 if prevtags:
154 if prevtags:
155 fp.write(prevtags)
155 fp.write(prevtags)
156
156
157 # committed tags are stored in UTF-8
157 # committed tags are stored in UTF-8
158 writetag(fp, name, util.fromlocal, prevtags)
158 writetag(fp, name, util.fromlocal, prevtags)
159
159
160 if use_dirstate and '.hgtags' not in self.dirstate:
160 if use_dirstate and '.hgtags' not in self.dirstate:
161 self.add(['.hgtags'])
161 self.add(['.hgtags'])
162
162
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 extra=extra)
164 extra=extra)
165
165
166 self.hook('tag', node=hex(node), tag=name, local=local)
166 self.hook('tag', node=hex(node), tag=name, local=local)
167
167
168 return tagnode
168 return tagnode
169
169
170 def tag(self, name, node, message, local, user, date):
170 def tag(self, name, node, message, local, user, date):
171 '''tag a revision with a symbolic name.
171 '''tag a revision with a symbolic name.
172
172
173 if local is True, the tag is stored in a per-repository file.
173 if local is True, the tag is stored in a per-repository file.
174 otherwise, it is stored in the .hgtags file, and a new
174 otherwise, it is stored in the .hgtags file, and a new
175 changeset is committed with the change.
175 changeset is committed with the change.
176
176
177 keyword arguments:
177 keyword arguments:
178
178
179 local: whether to store tag in non-version-controlled file
179 local: whether to store tag in non-version-controlled file
180 (default False)
180 (default False)
181
181
182 message: commit message to use if committing
182 message: commit message to use if committing
183
183
184 user: name of user to use if committing
184 user: name of user to use if committing
185
185
186 date: date tuple to use if committing'''
186 date: date tuple to use if committing'''
187
187
188 for x in self.status()[:5]:
188 for x in self.status()[:5]:
189 if '.hgtags' in x:
189 if '.hgtags' in x:
190 raise util.Abort(_('working copy of .hgtags is changed '
190 raise util.Abort(_('working copy of .hgtags is changed '
191 '(please commit .hgtags manually)'))
191 '(please commit .hgtags manually)'))
192
192
193
193
194 self._tag(name, node, message, local, user, date)
194 self._tag(name, node, message, local, user, date)
195
195
196 def tags(self):
196 def tags(self):
197 '''return a mapping of tag to node'''
197 '''return a mapping of tag to node'''
198 if self.tagscache:
198 if self.tagscache:
199 return self.tagscache
199 return self.tagscache
200
200
201 globaltags = {}
201 globaltags = {}
202
202
203 def readtags(lines, fn):
203 def readtags(lines, fn):
204 filetags = {}
204 filetags = {}
205 count = 0
205 count = 0
206
206
207 def warn(msg):
207 def warn(msg):
208 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
209
209
210 for l in lines:
210 for l in lines:
211 count += 1
211 count += 1
212 if not l:
212 if not l:
213 continue
213 continue
214 s = l.split(" ", 1)
214 s = l.split(" ", 1)
215 if len(s) != 2:
215 if len(s) != 2:
216 warn(_("cannot parse entry"))
216 warn(_("cannot parse entry"))
217 continue
217 continue
218 node, key = s
218 node, key = s
219 key = util.tolocal(key.strip()) # stored in UTF-8
219 key = util.tolocal(key.strip()) # stored in UTF-8
220 try:
220 try:
221 bin_n = bin(node)
221 bin_n = bin(node)
222 except TypeError:
222 except TypeError:
223 warn(_("node '%s' is not well formed") % node)
223 warn(_("node '%s' is not well formed") % node)
224 continue
224 continue
225 if bin_n not in self.changelog.nodemap:
225 if bin_n not in self.changelog.nodemap:
226 warn(_("tag '%s' refers to unknown node") % key)
226 warn(_("tag '%s' refers to unknown node") % key)
227 continue
227 continue
228
228
229 h = []
229 h = []
230 if key in filetags:
230 if key in filetags:
231 n, h = filetags[key]
231 n, h = filetags[key]
232 h.append(n)
232 h.append(n)
233 filetags[key] = (bin_n, h)
233 filetags[key] = (bin_n, h)
234
234
235 for k, nh in filetags.items():
235 for k, nh in filetags.items():
236 if k not in globaltags:
236 if k not in globaltags:
237 globaltags[k] = nh
237 globaltags[k] = nh
238 continue
238 continue
239 # we prefer the global tag if:
239 # we prefer the global tag if:
240 # it supercedes us OR
240 # it supercedes us OR
241 # mutual supercedes and it has a higher rank
241 # mutual supercedes and it has a higher rank
242 # otherwise we win because we're tip-most
242 # otherwise we win because we're tip-most
243 an, ah = nh
243 an, ah = nh
244 bn, bh = globaltags[k]
244 bn, bh = globaltags[k]
245 if (bn != an and an in bh and
245 if (bn != an and an in bh and
246 (bn not in ah or len(bh) > len(ah))):
246 (bn not in ah or len(bh) > len(ah))):
247 an = bn
247 an = bn
248 ah.extend([n for n in bh if n not in ah])
248 ah.extend([n for n in bh if n not in ah])
249 globaltags[k] = an, ah
249 globaltags[k] = an, ah
250
250
251 # read the tags file from each head, ending with the tip
251 # read the tags file from each head, ending with the tip
252 f = None
252 f = None
253 for rev, node, fnode in self._hgtagsnodes():
253 for rev, node, fnode in self._hgtagsnodes():
254 f = (f and f.filectx(fnode) or
254 f = (f and f.filectx(fnode) or
255 self.filectx('.hgtags', fileid=fnode))
255 self.filectx('.hgtags', fileid=fnode))
256 readtags(f.data().splitlines(), f)
256 readtags(f.data().splitlines(), f)
257
257
258 try:
258 try:
259 data = util.fromlocal(self.opener("localtags").read())
259 data = util.fromlocal(self.opener("localtags").read())
260 # localtags are stored in the local character set
260 # localtags are stored in the local character set
261 # while the internal tag table is stored in UTF-8
261 # while the internal tag table is stored in UTF-8
262 readtags(data.splitlines(), "localtags")
262 readtags(data.splitlines(), "localtags")
263 except IOError:
263 except IOError:
264 pass
264 pass
265
265
266 self.tagscache = {}
266 self.tagscache = {}
267 for k,nh in globaltags.items():
267 for k,nh in globaltags.items():
268 n = nh[0]
268 n = nh[0]
269 if n != nullid:
269 if n != nullid:
270 self.tagscache[k] = n
270 self.tagscache[k] = n
271 self.tagscache['tip'] = self.changelog.tip()
271 self.tagscache['tip'] = self.changelog.tip()
272
272
273 return self.tagscache
273 return self.tagscache
274
274
275 def _hgtagsnodes(self):
275 def _hgtagsnodes(self):
276 heads = self.heads()
276 heads = self.heads()
277 heads.reverse()
277 heads.reverse()
278 last = {}
278 last = {}
279 ret = []
279 ret = []
280 for node in heads:
280 for node in heads:
281 c = self.changectx(node)
281 c = self.changectx(node)
282 rev = c.rev()
282 rev = c.rev()
283 try:
283 try:
284 fnode = c.filenode('.hgtags')
284 fnode = c.filenode('.hgtags')
285 except revlog.LookupError:
285 except revlog.LookupError:
286 continue
286 continue
287 ret.append((rev, node, fnode))
287 ret.append((rev, node, fnode))
288 if fnode in last:
288 if fnode in last:
289 ret[last[fnode]] = None
289 ret[last[fnode]] = None
290 last[fnode] = len(ret) - 1
290 last[fnode] = len(ret) - 1
291 return [item for item in ret if item]
291 return [item for item in ret if item]
292
292
293 def tagslist(self):
293 def tagslist(self):
294 '''return a list of tags ordered by revision'''
294 '''return a list of tags ordered by revision'''
295 l = []
295 l = []
296 for t, n in self.tags().items():
296 for t, n in self.tags().items():
297 try:
297 try:
298 r = self.changelog.rev(n)
298 r = self.changelog.rev(n)
299 except:
299 except:
300 r = -2 # sort to the beginning of the list if unknown
300 r = -2 # sort to the beginning of the list if unknown
301 l.append((r, t, n))
301 l.append((r, t, n))
302 l.sort()
302 l.sort()
303 return [(t, n) for r, t, n in l]
303 return [(t, n) for r, t, n in l]
304
304
305 def nodetags(self, node):
305 def nodetags(self, node):
306 '''return the tags associated with a node'''
306 '''return the tags associated with a node'''
307 if not self.nodetagscache:
307 if not self.nodetagscache:
308 self.nodetagscache = {}
308 self.nodetagscache = {}
309 for t, n in self.tags().items():
309 for t, n in self.tags().items():
310 self.nodetagscache.setdefault(n, []).append(t)
310 self.nodetagscache.setdefault(n, []).append(t)
311 return self.nodetagscache.get(node, [])
311 return self.nodetagscache.get(node, [])
312
312
313 def _branchtags(self):
313 def _branchtags(self):
314 partial, last, lrev = self._readbranchcache()
314 partial, last, lrev = self._readbranchcache()
315
315
316 tiprev = self.changelog.count() - 1
316 tiprev = self.changelog.count() - 1
317 if lrev != tiprev:
317 if lrev != tiprev:
318 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._updatebranchcache(partial, lrev+1, tiprev+1)
319 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 self._writebranchcache(partial, self.changelog.tip(), tiprev)
320
320
321 return partial
321 return partial
322
322
323 def branchtags(self):
323 def branchtags(self):
324 if self.branchcache is not None:
324 if self.branchcache is not None:
325 return self.branchcache
325 return self.branchcache
326
326
327 self.branchcache = {} # avoid recursion in changectx
327 self.branchcache = {} # avoid recursion in changectx
328 partial = self._branchtags()
328 partial = self._branchtags()
329
329
330 # the branch cache is stored on disk as UTF-8, but in the local
330 # the branch cache is stored on disk as UTF-8, but in the local
331 # charset internally
331 # charset internally
332 for k, v in partial.items():
332 for k, v in partial.items():
333 self.branchcache[util.tolocal(k)] = v
333 self.branchcache[util.tolocal(k)] = v
334 return self.branchcache
334 return self.branchcache
335
335
336 def _readbranchcache(self):
336 def _readbranchcache(self):
337 partial = {}
337 partial = {}
338 try:
338 try:
339 f = self.opener("branch.cache")
339 f = self.opener("branch.cache")
340 lines = f.read().split('\n')
340 lines = f.read().split('\n')
341 f.close()
341 f.close()
342 except (IOError, OSError):
342 except (IOError, OSError):
343 return {}, nullid, nullrev
343 return {}, nullid, nullrev
344
344
345 try:
345 try:
346 last, lrev = lines.pop(0).split(" ", 1)
346 last, lrev = lines.pop(0).split(" ", 1)
347 last, lrev = bin(last), int(lrev)
347 last, lrev = bin(last), int(lrev)
348 if not (lrev < self.changelog.count() and
348 if not (lrev < self.changelog.count() and
349 self.changelog.node(lrev) == last): # sanity check
349 self.changelog.node(lrev) == last): # sanity check
350 # invalidate the cache
350 # invalidate the cache
351 raise ValueError('Invalid branch cache: unknown tip')
351 raise ValueError('Invalid branch cache: unknown tip')
352 for l in lines:
352 for l in lines:
353 if not l: continue
353 if not l: continue
354 node, label = l.split(" ", 1)
354 node, label = l.split(" ", 1)
355 partial[label.strip()] = bin(node)
355 partial[label.strip()] = bin(node)
356 except (KeyboardInterrupt, util.SignalInterrupt):
356 except (KeyboardInterrupt, util.SignalInterrupt):
357 raise
357 raise
358 except Exception, inst:
358 except Exception, inst:
359 if self.ui.debugflag:
359 if self.ui.debugflag:
360 self.ui.warn(str(inst), '\n')
360 self.ui.warn(str(inst), '\n')
361 partial, last, lrev = {}, nullid, nullrev
361 partial, last, lrev = {}, nullid, nullrev
362 return partial, last, lrev
362 return partial, last, lrev
363
363
364 def _writebranchcache(self, branches, tip, tiprev):
364 def _writebranchcache(self, branches, tip, tiprev):
365 try:
365 try:
366 f = self.opener("branch.cache", "w", atomictemp=True)
366 f = self.opener("branch.cache", "w", atomictemp=True)
367 f.write("%s %s\n" % (hex(tip), tiprev))
367 f.write("%s %s\n" % (hex(tip), tiprev))
368 for label, node in branches.iteritems():
368 for label, node in branches.iteritems():
369 f.write("%s %s\n" % (hex(node), label))
369 f.write("%s %s\n" % (hex(node), label))
370 f.rename()
370 f.rename()
371 except (IOError, OSError):
371 except (IOError, OSError):
372 pass
372 pass
373
373
374 def _updatebranchcache(self, partial, start, end):
374 def _updatebranchcache(self, partial, start, end):
375 for r in xrange(start, end):
375 for r in xrange(start, end):
376 c = self.changectx(r)
376 c = self.changectx(r)
377 b = c.branch()
377 b = c.branch()
378 partial[b] = c.node()
378 partial[b] = c.node()
379
379
380 def lookup(self, key):
380 def lookup(self, key):
381 if key == '.':
381 if key == '.':
382 key, second = self.dirstate.parents()
382 key, second = self.dirstate.parents()
383 if key == nullid:
383 if key == nullid:
384 raise repo.RepoError(_("no revision checked out"))
384 raise repo.RepoError(_("no revision checked out"))
385 if second != nullid:
385 if second != nullid:
386 self.ui.warn(_("warning: working directory has two parents, "
386 self.ui.warn(_("warning: working directory has two parents, "
387 "tag '.' uses the first\n"))
387 "tag '.' uses the first\n"))
388 elif key == 'null':
388 elif key == 'null':
389 return nullid
389 return nullid
390 n = self.changelog._match(key)
390 n = self.changelog._match(key)
391 if n:
391 if n:
392 return n
392 return n
393 if key in self.tags():
393 if key in self.tags():
394 return self.tags()[key]
394 return self.tags()[key]
395 if key in self.branchtags():
395 if key in self.branchtags():
396 return self.branchtags()[key]
396 return self.branchtags()[key]
397 n = self.changelog._partialmatch(key)
397 n = self.changelog._partialmatch(key)
398 if n:
398 if n:
399 return n
399 return n
400 try:
400 try:
401 if len(key) == 20:
401 if len(key) == 20:
402 key = hex(key)
402 key = hex(key)
403 except:
403 except:
404 pass
404 pass
405 raise repo.RepoError(_("unknown revision '%s'") % key)
405 raise repo.RepoError(_("unknown revision '%s'") % key)
406
406
407 def dev(self):
407 def dev(self):
408 return os.lstat(self.path).st_dev
408 return os.lstat(self.path).st_dev
409
409
410 def local(self):
410 def local(self):
411 return True
411 return True
412
412
413 def join(self, f):
413 def join(self, f):
414 return os.path.join(self.path, f)
414 return os.path.join(self.path, f)
415
415
416 def sjoin(self, f):
416 def sjoin(self, f):
417 f = self.encodefn(f)
417 f = self.encodefn(f)
418 return os.path.join(self.spath, f)
418 return os.path.join(self.spath, f)
419
419
420 def wjoin(self, f):
420 def wjoin(self, f):
421 return os.path.join(self.root, f)
421 return os.path.join(self.root, f)
422
422
423 def file(self, f):
423 def file(self, f):
424 if f[0] == '/':
424 if f[0] == '/':
425 f = f[1:]
425 f = f[1:]
426 return filelog.filelog(self.sopener, f)
426 return filelog.filelog(self.sopener, f)
427
427
428 def changectx(self, changeid=None):
428 def changectx(self, changeid=None):
429 return context.changectx(self, changeid)
429 return context.changectx(self, changeid)
430
430
431 def workingctx(self):
431 def workingctx(self):
432 return context.workingctx(self)
432 return context.workingctx(self)
433
433
434 def parents(self, changeid=None):
434 def parents(self, changeid=None):
435 '''
435 '''
436 get list of changectxs for parents of changeid or working directory
436 get list of changectxs for parents of changeid or working directory
437 '''
437 '''
438 if changeid is None:
438 if changeid is None:
439 pl = self.dirstate.parents()
439 pl = self.dirstate.parents()
440 else:
440 else:
441 n = self.changelog.lookup(changeid)
441 n = self.changelog.lookup(changeid)
442 pl = self.changelog.parents(n)
442 pl = self.changelog.parents(n)
443 if pl[1] == nullid:
443 if pl[1] == nullid:
444 return [self.changectx(pl[0])]
444 return [self.changectx(pl[0])]
445 return [self.changectx(pl[0]), self.changectx(pl[1])]
445 return [self.changectx(pl[0]), self.changectx(pl[1])]
446
446
447 def filectx(self, path, changeid=None, fileid=None):
447 def filectx(self, path, changeid=None, fileid=None):
448 """changeid can be a changeset revision, node, or tag.
448 """changeid can be a changeset revision, node, or tag.
449 fileid can be a file revision or node."""
449 fileid can be a file revision or node."""
450 return context.filectx(self, path, changeid, fileid)
450 return context.filectx(self, path, changeid, fileid)
451
451
452 def getcwd(self):
452 def getcwd(self):
453 return self.dirstate.getcwd()
453 return self.dirstate.getcwd()
454
454
455 def pathto(self, f, cwd=None):
455 def pathto(self, f, cwd=None):
456 return self.dirstate.pathto(f, cwd)
456 return self.dirstate.pathto(f, cwd)
457
457
458 def wfile(self, f, mode='r'):
458 def wfile(self, f, mode='r'):
459 return self.wopener(f, mode)
459 return self.wopener(f, mode)
460
460
461 def _link(self, f):
461 def _link(self, f):
462 return os.path.islink(self.wjoin(f))
462 return os.path.islink(self.wjoin(f))
463
463
464 def _filter(self, filter, filename, data):
464 def _filter(self, filter, filename, data):
465 if filter not in self.filterpats:
465 if filter not in self.filterpats:
466 l = []
466 l = []
467 for pat, cmd in self.ui.configitems(filter):
467 for pat, cmd in self.ui.configitems(filter):
468 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 mf = util.matcher(self.root, "", [pat], [], [])[1]
469 l.append((mf, cmd))
469 l.append((mf, cmd))
470 self.filterpats[filter] = l
470 self.filterpats[filter] = l
471
471
472 for mf, cmd in self.filterpats[filter]:
472 for mf, cmd in self.filterpats[filter]:
473 if mf(filename):
473 if mf(filename):
474 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
474 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
475 data = util.filter(data, cmd)
475 data = util.filter(data, cmd)
476 break
476 break
477
477
478 return data
478 return data
479
479
480 def wread(self, filename):
480 def wread(self, filename):
481 if self._link(filename):
481 if self._link(filename):
482 data = os.readlink(self.wjoin(filename))
482 data = os.readlink(self.wjoin(filename))
483 else:
483 else:
484 data = self.wopener(filename, 'r').read()
484 data = self.wopener(filename, 'r').read()
485 return self._filter("encode", filename, data)
485 return self._filter("encode", filename, data)
486
486
487 def wwrite(self, filename, data, flags):
487 def wwrite(self, filename, data, flags):
488 data = self._filter("decode", filename, data)
488 data = self._filter("decode", filename, data)
489 if "l" in flags:
489 if "l" in flags:
490 self.wopener.symlink(data, filename)
490 self.wopener.symlink(data, filename)
491 else:
491 else:
492 try:
492 try:
493 if self._link(filename):
493 if self._link(filename):
494 os.unlink(self.wjoin(filename))
494 os.unlink(self.wjoin(filename))
495 except OSError:
495 except OSError:
496 pass
496 pass
497 self.wopener(filename, 'w').write(data)
497 self.wopener(filename, 'w').write(data)
498 util.set_exec(self.wjoin(filename), "x" in flags)
498 util.set_exec(self.wjoin(filename), "x" in flags)
499
499
500 def wwritedata(self, filename, data):
500 def wwritedata(self, filename, data):
501 return self._filter("decode", filename, data)
501 return self._filter("decode", filename, data)
502
502
503 def transaction(self):
503 def transaction(self):
504 if self._transref and self._transref():
504 if self._transref and self._transref():
505 return self._transref().nest()
505 return self._transref().nest()
506
506
507 # save dirstate for rollback
507 # save dirstate for rollback
508 try:
508 try:
509 ds = self.opener("dirstate").read()
509 ds = self.opener("dirstate").read()
510 except IOError:
510 except IOError:
511 ds = ""
511 ds = ""
512 self.opener("journal.dirstate", "w").write(ds)
512 self.opener("journal.dirstate", "w").write(ds)
513
513
514 renames = [(self.sjoin("journal"), self.sjoin("undo")),
514 renames = [(self.sjoin("journal"), self.sjoin("undo")),
515 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
515 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
516 tr = transaction.transaction(self.ui.warn, self.sopener,
516 tr = transaction.transaction(self.ui.warn, self.sopener,
517 self.sjoin("journal"),
517 self.sjoin("journal"),
518 aftertrans(renames))
518 aftertrans(renames))
519 self._transref = weakref.ref(tr)
519 self._transref = weakref.ref(tr)
520 return tr
520 return tr
521
521
522 def recover(self):
522 def recover(self):
523 l = self.lock()
523 l = self.lock()
524 try:
524 try:
525 if os.path.exists(self.sjoin("journal")):
525 if os.path.exists(self.sjoin("journal")):
526 self.ui.status(_("rolling back interrupted transaction\n"))
526 self.ui.status(_("rolling back interrupted transaction\n"))
527 transaction.rollback(self.sopener, self.sjoin("journal"))
527 transaction.rollback(self.sopener, self.sjoin("journal"))
528 self.invalidate()
528 self.invalidate()
529 return True
529 return True
530 else:
530 else:
531 self.ui.warn(_("no interrupted transaction available\n"))
531 self.ui.warn(_("no interrupted transaction available\n"))
532 return False
532 return False
533 finally:
533 finally:
534 del l
534 del l
535
535
536 def rollback(self):
536 def rollback(self):
537 wlock = lock = None
537 wlock = lock = None
538 try:
538 try:
539 wlock = self.wlock()
539 wlock = self.wlock()
540 lock = self.lock()
540 lock = self.lock()
541 if os.path.exists(self.sjoin("undo")):
541 if os.path.exists(self.sjoin("undo")):
542 self.ui.status(_("rolling back last transaction\n"))
542 self.ui.status(_("rolling back last transaction\n"))
543 transaction.rollback(self.sopener, self.sjoin("undo"))
543 transaction.rollback(self.sopener, self.sjoin("undo"))
544 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
544 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
545 self.invalidate()
545 self.invalidate()
546 self.dirstate.invalidate()
546 self.dirstate.invalidate()
547 else:
547 else:
548 self.ui.warn(_("no rollback information available\n"))
548 self.ui.warn(_("no rollback information available\n"))
549 finally:
549 finally:
550 del lock, wlock
550 del lock, wlock
551
551
552 def invalidate(self):
552 def invalidate(self):
553 for a in "changelog manifest".split():
553 for a in "changelog manifest".split():
554 if hasattr(self, a):
554 if hasattr(self, a):
555 self.__delattr__(a)
555 self.__delattr__(a)
556 self.tagscache = None
556 self.tagscache = None
557 self.nodetagscache = None
557 self.nodetagscache = None
558
558
559 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
559 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
560 try:
560 try:
561 l = lock.lock(lockname, 0, releasefn, desc=desc)
561 l = lock.lock(lockname, 0, releasefn, desc=desc)
562 except lock.LockHeld, inst:
562 except lock.LockHeld, inst:
563 if not wait:
563 if not wait:
564 raise
564 raise
565 self.ui.warn(_("waiting for lock on %s held by %r\n") %
565 self.ui.warn(_("waiting for lock on %s held by %r\n") %
566 (desc, inst.locker))
566 (desc, inst.locker))
567 # default to 600 seconds timeout
567 # default to 600 seconds timeout
568 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
568 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
569 releasefn, desc=desc)
569 releasefn, desc=desc)
570 if acquirefn:
570 if acquirefn:
571 acquirefn()
571 acquirefn()
572 return l
572 return l
573
573
574 def lock(self, wait=True):
574 def lock(self, wait=True):
575 if self._lockref and self._lockref():
575 if self._lockref and self._lockref():
576 return self._lockref()
576 return self._lockref()
577
577
578 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
578 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
579 _('repository %s') % self.origroot)
579 _('repository %s') % self.origroot)
580 self._lockref = weakref.ref(l)
580 self._lockref = weakref.ref(l)
581 return l
581 return l
582
582
583 def wlock(self, wait=True):
583 def wlock(self, wait=True):
584 if self._wlockref and self._wlockref():
584 if self._wlockref and self._wlockref():
585 return self._wlockref()
585 return self._wlockref()
586
586
587 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
587 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
588 self.dirstate.invalidate, _('working directory of %s') %
588 self.dirstate.invalidate, _('working directory of %s') %
589 self.origroot)
589 self.origroot)
590 self._wlockref = weakref.ref(l)
590 self._wlockref = weakref.ref(l)
591 return l
591 return l
592
592
593 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
593 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
594 """
594 """
595 commit an individual file as part of a larger transaction
595 commit an individual file as part of a larger transaction
596 """
596 """
597
597
598 t = self.wread(fn)
598 t = self.wread(fn)
599 fl = self.file(fn)
599 fl = self.file(fn)
600 fp1 = manifest1.get(fn, nullid)
600 fp1 = manifest1.get(fn, nullid)
601 fp2 = manifest2.get(fn, nullid)
601 fp2 = manifest2.get(fn, nullid)
602
602
603 meta = {}
603 meta = {}
604 cp = self.dirstate.copied(fn)
604 cp = self.dirstate.copied(fn)
605 if cp:
605 if cp:
606 # Mark the new revision of this file as a copy of another
606 # Mark the new revision of this file as a copy of another
607 # file. This copy data will effectively act as a parent
607 # file. This copy data will effectively act as a parent
608 # of this new revision. If this is a merge, the first
608 # of this new revision. If this is a merge, the first
609 # parent will be the nullid (meaning "look up the copy data")
609 # parent will be the nullid (meaning "look up the copy data")
610 # and the second one will be the other parent. For example:
610 # and the second one will be the other parent. For example:
611 #
611 #
612 # 0 --- 1 --- 3 rev1 changes file foo
612 # 0 --- 1 --- 3 rev1 changes file foo
613 # \ / rev2 renames foo to bar and changes it
613 # \ / rev2 renames foo to bar and changes it
614 # \- 2 -/ rev3 should have bar with all changes and
614 # \- 2 -/ rev3 should have bar with all changes and
615 # should record that bar descends from
615 # should record that bar descends from
616 # bar in rev2 and foo in rev1
616 # bar in rev2 and foo in rev1
617 #
617 #
618 # this allows this merge to succeed:
618 # this allows this merge to succeed:
619 #
619 #
620 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
620 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
621 # \ / merging rev3 and rev4 should use bar@rev2
621 # \ / merging rev3 and rev4 should use bar@rev2
622 # \- 2 --- 4 as the merge base
622 # \- 2 --- 4 as the merge base
623 #
623 #
624 meta["copy"] = cp
624 meta["copy"] = cp
625 if not manifest2: # not a branch merge
625 if not manifest2: # not a branch merge
626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
627 fp2 = nullid
627 fp2 = nullid
628 elif fp2 != nullid: # copied on remote side
628 elif fp2 != nullid: # copied on remote side
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 elif fp1 != nullid: # copied on local side, reversed
630 elif fp1 != nullid: # copied on local side, reversed
631 meta["copyrev"] = hex(manifest2.get(cp))
631 meta["copyrev"] = hex(manifest2.get(cp))
632 fp2 = fp1
632 fp2 = fp1
633 elif cp in manifest2: # directory rename on local side
633 elif cp in manifest2: # directory rename on local side
634 meta["copyrev"] = hex(manifest2[cp])
634 meta["copyrev"] = hex(manifest2[cp])
635 else: # directory rename on remote side
635 else: # directory rename on remote side
636 meta["copyrev"] = hex(manifest1.get(cp, nullid))
636 meta["copyrev"] = hex(manifest1.get(cp, nullid))
637 self.ui.debug(_(" %s: copy %s:%s\n") %
637 self.ui.debug(_(" %s: copy %s:%s\n") %
638 (fn, cp, meta["copyrev"]))
638 (fn, cp, meta["copyrev"]))
639 fp1 = nullid
639 fp1 = nullid
640 elif fp2 != nullid:
640 elif fp2 != nullid:
641 # is one parent an ancestor of the other?
641 # is one parent an ancestor of the other?
642 fpa = fl.ancestor(fp1, fp2)
642 fpa = fl.ancestor(fp1, fp2)
643 if fpa == fp1:
643 if fpa == fp1:
644 fp1, fp2 = fp2, nullid
644 fp1, fp2 = fp2, nullid
645 elif fpa == fp2:
645 elif fpa == fp2:
646 fp2 = nullid
646 fp2 = nullid
647
647
648 # is the file unmodified from the parent? report existing entry
648 # is the file unmodified from the parent? report existing entry
649 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
649 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
650 return fp1
650 return fp1
651
651
652 changelist.append(fn)
652 changelist.append(fn)
653 return fl.add(t, meta, tr, linkrev, fp1, fp2)
653 return fl.add(t, meta, tr, linkrev, fp1, fp2)
654
654
655 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
655 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
656 if p1 is None:
656 if p1 is None:
657 p1, p2 = self.dirstate.parents()
657 p1, p2 = self.dirstate.parents()
658 return self.commit(files=files, text=text, user=user, date=date,
658 return self.commit(files=files, text=text, user=user, date=date,
659 p1=p1, p2=p2, extra=extra, empty_ok=True)
659 p1=p1, p2=p2, extra=extra, empty_ok=True)
660
660
661 def commit(self, files=None, text="", user=None, date=None,
661 def commit(self, files=None, text="", user=None, date=None,
662 match=util.always, force=False, force_editor=False,
662 match=util.always, force=False, force_editor=False,
663 p1=None, p2=None, extra={}, empty_ok=False):
663 p1=None, p2=None, extra={}, empty_ok=False):
664 wlock = lock = tr = None
664 wlock = lock = tr = None
665 try:
665 try:
666 commit = []
666 commit = []
667 remove = []
667 remove = []
668 changed = []
668 changed = []
669 use_dirstate = (p1 is None) # not rawcommit
669 use_dirstate = (p1 is None) # not rawcommit
670 extra = extra.copy()
670 extra = extra.copy()
671
671
672 if use_dirstate:
672 if use_dirstate:
673 if files:
673 if files:
674 for f in files:
674 for f in files:
675 s = self.dirstate[f]
675 s = self.dirstate[f]
676 if s in 'nma':
676 if s in 'nma':
677 commit.append(f)
677 commit.append(f)
678 elif s == 'r':
678 elif s == 'r':
679 remove.append(f)
679 remove.append(f)
680 else:
680 else:
681 self.ui.warn(_("%s not tracked!\n") % f)
681 self.ui.warn(_("%s not tracked!\n") % f)
682 else:
682 else:
683 changes = self.status(match=match)[:5]
683 changes = self.status(match=match)[:5]
684 modified, added, removed, deleted, unknown = changes
684 modified, added, removed, deleted, unknown = changes
685 commit = modified + added
685 commit = modified + added
686 remove = removed
686 remove = removed
687 else:
687 else:
688 commit = files
688 commit = files
689
689
690 if use_dirstate:
690 if use_dirstate:
691 p1, p2 = self.dirstate.parents()
691 p1, p2 = self.dirstate.parents()
692 update_dirstate = True
692 update_dirstate = True
693 else:
693 else:
694 p1, p2 = p1, p2 or nullid
694 p1, p2 = p1, p2 or nullid
695 update_dirstate = (self.dirstate.parents()[0] == p1)
695 update_dirstate = (self.dirstate.parents()[0] == p1)
696
696
697 c1 = self.changelog.read(p1)
697 c1 = self.changelog.read(p1)
698 c2 = self.changelog.read(p2)
698 c2 = self.changelog.read(p2)
699 m1 = self.manifest.read(c1[0]).copy()
699 m1 = self.manifest.read(c1[0]).copy()
700 m2 = self.manifest.read(c2[0])
700 m2 = self.manifest.read(c2[0])
701
701
702 if use_dirstate:
702 if use_dirstate:
703 branchname = self.workingctx().branch()
703 branchname = self.workingctx().branch()
704 try:
704 try:
705 branchname = branchname.decode('UTF-8').encode('UTF-8')
705 branchname = branchname.decode('UTF-8').encode('UTF-8')
706 except UnicodeDecodeError:
706 except UnicodeDecodeError:
707 raise util.Abort(_('branch name not in UTF-8!'))
707 raise util.Abort(_('branch name not in UTF-8!'))
708 else:
708 else:
709 branchname = ""
709 branchname = ""
710
710
711 if use_dirstate:
711 if use_dirstate:
712 oldname = c1[5].get("branch") # stored in UTF-8
712 oldname = c1[5].get("branch") # stored in UTF-8
713 if (not commit and not remove and not force and p2 == nullid
713 if (not commit and not remove and not force and p2 == nullid
714 and branchname == oldname):
714 and branchname == oldname):
715 self.ui.status(_("nothing changed\n"))
715 self.ui.status(_("nothing changed\n"))
716 return None
716 return None
717
717
718 xp1 = hex(p1)
718 xp1 = hex(p1)
719 if p2 == nullid: xp2 = ''
719 if p2 == nullid: xp2 = ''
720 else: xp2 = hex(p2)
720 else: xp2 = hex(p2)
721
721
722 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
722 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
723
723
724 wlock = self.wlock()
724 wlock = self.wlock()
725 lock = self.lock()
725 lock = self.lock()
726 tr = self.transaction()
726 tr = self.transaction()
727 trp = weakref.proxy(tr)
727 trp = weakref.proxy(tr)
728
728
729 # check in files
729 # check in files
730 new = {}
730 new = {}
731 linkrev = self.changelog.count()
731 linkrev = self.changelog.count()
732 commit.sort()
732 commit.sort()
733 is_exec = util.execfunc(self.root, m1.execf)
733 is_exec = util.execfunc(self.root, m1.execf)
734 is_link = util.linkfunc(self.root, m1.linkf)
734 is_link = util.linkfunc(self.root, m1.linkf)
735 for f in commit:
735 for f in commit:
736 self.ui.note(f + "\n")
736 self.ui.note(f + "\n")
737 try:
737 try:
738 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
738 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
739 new_exec = is_exec(f)
739 new_exec = is_exec(f)
740 new_link = is_link(f)
740 new_link = is_link(f)
741 if ((not changed or changed[-1] != f) and
741 if ((not changed or changed[-1] != f) and
742 m2.get(f) != new[f]):
742 m2.get(f) != new[f]):
743 # mention the file in the changelog if some
743 # mention the file in the changelog if some
744 # flag changed, even if there was no content
744 # flag changed, even if there was no content
745 # change.
745 # change.
746 old_exec = m1.execf(f)
746 old_exec = m1.execf(f)
747 old_link = m1.linkf(f)
747 old_link = m1.linkf(f)
748 if old_exec != new_exec or old_link != new_link:
748 if old_exec != new_exec or old_link != new_link:
749 changed.append(f)
749 changed.append(f)
750 m1.set(f, new_exec, new_link)
750 m1.set(f, new_exec, new_link)
751 except (OSError, IOError):
751 except (OSError, IOError):
752 if use_dirstate:
752 if use_dirstate:
753 self.ui.warn(_("trouble committing %s!\n") % f)
753 self.ui.warn(_("trouble committing %s!\n") % f)
754 raise
754 raise
755 else:
755 else:
756 remove.append(f)
756 remove.append(f)
757
757
758 # update manifest
758 # update manifest
759 m1.update(new)
759 m1.update(new)
760 remove.sort()
760 remove.sort()
761 removed = []
761 removed = []
762
762
763 for f in remove:
763 for f in remove:
764 if f in m1:
764 if f in m1:
765 del m1[f]
765 del m1[f]
766 removed.append(f)
766 removed.append(f)
767 elif f in m2:
767 elif f in m2:
768 removed.append(f)
768 removed.append(f)
769 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
769 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
770 (new, removed))
770 (new, removed))
771
771
772 # add changeset
772 # add changeset
773 new = new.keys()
773 new = new.keys()
774 new.sort()
774 new.sort()
775
775
776 user = user or self.ui.username()
776 user = user or self.ui.username()
777 if (not empty_ok and not text) or force_editor:
777 if (not empty_ok and not text) or force_editor:
778 edittext = []
778 edittext = []
779 if text:
779 if text:
780 edittext.append(text)
780 edittext.append(text)
781 edittext.append("")
781 edittext.append("")
782 edittext.append("HG: user: %s" % user)
782 edittext.append("HG: user: %s" % user)
783 if p2 != nullid:
783 if p2 != nullid:
784 edittext.append("HG: branch merge")
784 edittext.append("HG: branch merge")
785 if branchname:
785 if branchname:
786 edittext.append("HG: branch %s" % util.tolocal(branchname))
786 edittext.append("HG: branch %s" % util.tolocal(branchname))
787 edittext.extend(["HG: changed %s" % f for f in changed])
787 edittext.extend(["HG: changed %s" % f for f in changed])
788 edittext.extend(["HG: removed %s" % f for f in removed])
788 edittext.extend(["HG: removed %s" % f for f in removed])
789 if not changed and not remove:
789 if not changed and not remove:
790 edittext.append("HG: no files changed")
790 edittext.append("HG: no files changed")
791 edittext.append("")
791 edittext.append("")
792 # run editor in the repository root
792 # run editor in the repository root
793 olddir = os.getcwd()
793 olddir = os.getcwd()
794 os.chdir(self.root)
794 os.chdir(self.root)
795 text = self.ui.edit("\n".join(edittext), user)
795 text = self.ui.edit("\n".join(edittext), user)
796 os.chdir(olddir)
796 os.chdir(olddir)
797
797
798 if branchname:
798 if branchname:
799 extra["branch"] = branchname
799 extra["branch"] = branchname
800
800
801 if use_dirstate:
801 if use_dirstate:
802 lines = [line.rstrip() for line in text.rstrip().splitlines()]
802 lines = [line.rstrip() for line in text.rstrip().splitlines()]
803 while lines and not lines[0]:
803 while lines and not lines[0]:
804 del lines[0]
804 del lines[0]
805 if not lines:
805 if not lines:
806 return None
806 return None
807 text = '\n'.join(lines)
807 text = '\n'.join(lines)
808
808
809 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
809 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
810 user, date, extra)
810 user, date, extra)
811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
812 parent2=xp2)
812 parent2=xp2)
813 tr.close()
813 tr.close()
814
814
815 if self.branchcache and "branch" in extra:
815 if self.branchcache and "branch" in extra:
816 self.branchcache[util.tolocal(extra["branch"])] = n
816 self.branchcache[util.tolocal(extra["branch"])] = n
817
817
818 if use_dirstate or update_dirstate:
818 if use_dirstate or update_dirstate:
819 self.dirstate.setparents(n)
819 self.dirstate.setparents(n)
820 if use_dirstate:
820 if use_dirstate:
821 for f in new:
821 for f in new:
822 self.dirstate.normal(f)
822 self.dirstate.normal(f)
823 for f in removed:
823 for f in removed:
824 self.dirstate.forget(f)
824 self.dirstate.forget(f)
825
825
826 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
826 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
827 return n
827 return n
828 finally:
828 finally:
829 del tr, lock, wlock
829 del tr, lock, wlock
830
830
831 def walk(self, node=None, files=[], match=util.always, badmatch=None):
831 def walk(self, node=None, files=[], match=util.always, badmatch=None):
832 '''
832 '''
833 walk recursively through the directory tree or a given
833 walk recursively through the directory tree or a given
834 changeset, finding all files matched by the match
834 changeset, finding all files matched by the match
835 function
835 function
836
836
837 results are yielded in a tuple (src, filename), where src
837 results are yielded in a tuple (src, filename), where src
838 is one of:
838 is one of:
839 'f' the file was found in the directory tree
839 'f' the file was found in the directory tree
840 'm' the file was only in the dirstate and not in the tree
840 'm' the file was only in the dirstate and not in the tree
841 'b' file was not found and matched badmatch
841 'b' file was not found and matched badmatch
842 '''
842 '''
843
843
844 if node:
844 if node:
845 fdict = dict.fromkeys(files)
845 fdict = dict.fromkeys(files)
846 # for dirstate.walk, files=['.'] means "walk the whole tree".
846 # for dirstate.walk, files=['.'] means "walk the whole tree".
847 # follow that here, too
847 # follow that here, too
848 fdict.pop('.', None)
848 fdict.pop('.', None)
849 mdict = self.manifest.read(self.changelog.read(node)[0])
849 mdict = self.manifest.read(self.changelog.read(node)[0])
850 mfiles = mdict.keys()
850 mfiles = mdict.keys()
851 mfiles.sort()
851 mfiles.sort()
852 for fn in mfiles:
852 for fn in mfiles:
853 for ffn in fdict:
853 for ffn in fdict:
854 # match if the file is the exact name or a directory
854 # match if the file is the exact name or a directory
855 if ffn == fn or fn.startswith("%s/" % ffn):
855 if ffn == fn or fn.startswith("%s/" % ffn):
856 del fdict[ffn]
856 del fdict[ffn]
857 break
857 break
858 if match(fn):
858 if match(fn):
859 yield 'm', fn
859 yield 'm', fn
860 ffiles = fdict.keys()
860 ffiles = fdict.keys()
861 ffiles.sort()
861 ffiles.sort()
862 for fn in ffiles:
862 for fn in ffiles:
863 if badmatch and badmatch(fn):
863 if badmatch and badmatch(fn):
864 if match(fn):
864 if match(fn):
865 yield 'b', fn
865 yield 'b', fn
866 else:
866 else:
867 self.ui.warn(_('%s: No such file in rev %s\n')
867 self.ui.warn(_('%s: No such file in rev %s\n')
868 % (self.pathto(fn), short(node)))
868 % (self.pathto(fn), short(node)))
869 else:
869 else:
870 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
871 yield src, fn
871 yield src, fn
872
872
873 def status(self, node1=None, node2=None, files=[], match=util.always,
873 def status(self, node1=None, node2=None, files=[], match=util.always,
874 list_ignored=False, list_clean=False):
874 list_ignored=False, list_clean=False):
875 """return status of files between two nodes or node and working directory
875 """return status of files between two nodes or node and working directory
876
876
877 If node1 is None, use the first dirstate parent instead.
877 If node1 is None, use the first dirstate parent instead.
878 If node2 is None, compare node1 with working directory.
878 If node2 is None, compare node1 with working directory.
879 """
879 """
880
880
881 def fcmp(fn, getnode):
881 def fcmp(fn, getnode):
882 t1 = self.wread(fn)
882 t1 = self.wread(fn)
883 return self.file(fn).cmp(getnode(fn), t1)
883 return self.file(fn).cmp(getnode(fn), t1)
884
884
885 def mfmatches(node):
885 def mfmatches(node):
886 change = self.changelog.read(node)
886 change = self.changelog.read(node)
887 mf = self.manifest.read(change[0]).copy()
887 mf = self.manifest.read(change[0]).copy()
888 for fn in mf.keys():
888 for fn in mf.keys():
889 if not match(fn):
889 if not match(fn):
890 del mf[fn]
890 del mf[fn]
891 return mf
891 return mf
892
892
893 modified, added, removed, deleted, unknown = [], [], [], [], []
893 modified, added, removed, deleted, unknown = [], [], [], [], []
894 ignored, clean = [], []
894 ignored, clean = [], []
895
895
896 compareworking = False
896 compareworking = False
897 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
898 compareworking = True
898 compareworking = True
899
899
900 if not compareworking:
900 if not compareworking:
901 # read the manifest from node1 before the manifest from node2,
901 # read the manifest from node1 before the manifest from node2,
902 # so that we'll hit the manifest cache if we're going through
902 # so that we'll hit the manifest cache if we're going through
903 # all the revisions in parent->child order.
903 # all the revisions in parent->child order.
904 mf1 = mfmatches(node1)
904 mf1 = mfmatches(node1)
905
905
906 # are we comparing the working directory?
906 # are we comparing the working directory?
907 if not node2:
907 if not node2:
908 (lookup, modified, added, removed, deleted, unknown,
908 (lookup, modified, added, removed, deleted, unknown,
909 ignored, clean) = self.dirstate.status(files, match,
909 ignored, clean) = self.dirstate.status(files, match,
910 list_ignored, list_clean)
910 list_ignored, list_clean)
911
911
912 # are we comparing working dir against its parent?
912 # are we comparing working dir against its parent?
913 if compareworking:
913 if compareworking:
914 if lookup:
914 if lookup:
915 fixup = []
915 fixup = []
916 # do a full compare of any files that might have changed
916 # do a full compare of any files that might have changed
917 ctx = self.changectx()
917 ctx = self.changectx()
918 for f in lookup:
918 for f in lookup:
919 if f not in ctx or ctx[f].cmp(self.wread(f)):
919 if f not in ctx or ctx[f].cmp(self.wread(f)):
920 modified.append(f)
920 modified.append(f)
921 else:
921 else:
922 fixup.append(f)
922 fixup.append(f)
923 if list_clean:
923 if list_clean:
924 clean.append(f)
924 clean.append(f)
925
925
926 # update dirstate for files that are actually clean
926 # update dirstate for files that are actually clean
927 if fixup:
927 if fixup:
928 wlock = None
928 wlock = None
929 try:
929 try:
930 try:
930 try:
931 wlock = self.wlock(False)
931 wlock = self.wlock(False)
932 except lock.LockException:
932 except lock.LockException:
933 pass
933 pass
934 if wlock:
934 if wlock:
935 for f in fixup:
935 for f in fixup:
936 self.dirstate.normal(f)
936 self.dirstate.normal(f)
937 finally:
937 finally:
938 del wlock
938 del wlock
939 else:
939 else:
940 # we are comparing working dir against non-parent
940 # we are comparing working dir against non-parent
941 # generate a pseudo-manifest for the working dir
941 # generate a pseudo-manifest for the working dir
942 # XXX: create it in dirstate.py ?
942 # XXX: create it in dirstate.py ?
943 mf2 = mfmatches(self.dirstate.parents()[0])
943 mf2 = mfmatches(self.dirstate.parents()[0])
944 is_exec = util.execfunc(self.root, mf2.execf)
944 is_exec = util.execfunc(self.root, mf2.execf)
945 is_link = util.linkfunc(self.root, mf2.linkf)
945 is_link = util.linkfunc(self.root, mf2.linkf)
946 for f in lookup + modified + added:
946 for f in lookup + modified + added:
947 mf2[f] = ""
947 mf2[f] = ""
948 mf2.set(f, is_exec(f), is_link(f))
948 mf2.set(f, is_exec(f), is_link(f))
949 for f in removed:
949 for f in removed:
950 if f in mf2:
950 if f in mf2:
951 del mf2[f]
951 del mf2[f]
952
952
953 else:
953 else:
954 # we are comparing two revisions
954 # we are comparing two revisions
955 mf2 = mfmatches(node2)
955 mf2 = mfmatches(node2)
956
956
957 if not compareworking:
957 if not compareworking:
958 # flush lists from dirstate before comparing manifests
958 # flush lists from dirstate before comparing manifests
959 modified, added, clean = [], [], []
959 modified, added, clean = [], [], []
960
960
961 # make sure to sort the files so we talk to the disk in a
961 # make sure to sort the files so we talk to the disk in a
962 # reasonable order
962 # reasonable order
963 mf2keys = mf2.keys()
963 mf2keys = mf2.keys()
964 mf2keys.sort()
964 mf2keys.sort()
965 getnode = lambda fn: mf1.get(fn, nullid)
965 getnode = lambda fn: mf1.get(fn, nullid)
966 for fn in mf2keys:
966 for fn in mf2keys:
967 if mf1.has_key(fn):
967 if mf1.has_key(fn):
968 if (mf1.flags(fn) != mf2.flags(fn) or
968 if (mf1.flags(fn) != mf2.flags(fn) or
969 (mf1[fn] != mf2[fn] and
969 (mf1[fn] != mf2[fn] and
970 (mf2[fn] != "" or fcmp(fn, getnode)))):
970 (mf2[fn] != "" or fcmp(fn, getnode)))):
971 modified.append(fn)
971 modified.append(fn)
972 elif list_clean:
972 elif list_clean:
973 clean.append(fn)
973 clean.append(fn)
974 del mf1[fn]
974 del mf1[fn]
975 else:
975 else:
976 added.append(fn)
976 added.append(fn)
977
977
978 removed = mf1.keys()
978 removed = mf1.keys()
979
979
980 # sort and return results:
980 # sort and return results:
981 for l in modified, added, removed, deleted, unknown, ignored, clean:
981 for l in modified, added, removed, deleted, unknown, ignored, clean:
982 l.sort()
982 l.sort()
983 return (modified, added, removed, deleted, unknown, ignored, clean)
983 return (modified, added, removed, deleted, unknown, ignored, clean)
984
984
985 def add(self, list):
985 def add(self, list):
986 wlock = self.wlock()
986 wlock = self.wlock()
987 try:
987 try:
988 for f in list:
988 for f in list:
989 p = self.wjoin(f)
989 p = self.wjoin(f)
990 try:
990 try:
991 st = os.lstat(p)
991 st = os.lstat(p)
992 except:
992 except:
993 self.ui.warn(_("%s does not exist!\n") % f)
993 self.ui.warn(_("%s does not exist!\n") % f)
994 continue
994 continue
995 if st.st_size > 10000000:
995 if st.st_size > 10000000:
996 self.ui.warn(_("%s: files over 10MB may cause memory and"
996 self.ui.warn(_("%s: files over 10MB may cause memory and"
997 " performance problems\n"
997 " performance problems\n"
998 "(use 'hg revert %s' to unadd the file)\n")
998 "(use 'hg revert %s' to unadd the file)\n")
999 % (f, f))
999 % (f, f))
1000 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1000 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1001 self.ui.warn(_("%s not added: only files and symlinks "
1001 self.ui.warn(_("%s not added: only files and symlinks "
1002 "supported currently\n") % f)
1002 "supported currently\n") % f)
1003 elif self.dirstate[f] in 'amn':
1003 elif self.dirstate[f] in 'amn':
1004 self.ui.warn(_("%s already tracked!\n") % f)
1004 self.ui.warn(_("%s already tracked!\n") % f)
1005 elif self.dirstate[f] == 'r':
1005 elif self.dirstate[f] == 'r':
1006 self.dirstate.normallookup(f)
1006 self.dirstate.normallookup(f)
1007 else:
1007 else:
1008 self.dirstate.add(f)
1008 self.dirstate.add(f)
1009 finally:
1009 finally:
1010 del wlock
1010 del wlock
1011
1011
1012 def forget(self, list):
1012 def forget(self, list):
1013 wlock = self.wlock()
1013 wlock = self.wlock()
1014 try:
1014 try:
1015 for f in list:
1015 for f in list:
1016 if self.dirstate[f] != 'a':
1016 if self.dirstate[f] != 'a':
1017 self.ui.warn(_("%s not added!\n") % f)
1017 self.ui.warn(_("%s not added!\n") % f)
1018 else:
1018 else:
1019 self.dirstate.forget(f)
1019 self.dirstate.forget(f)
1020 finally:
1020 finally:
1021 del wlock
1021 del wlock
1022
1022
1023 def remove(self, list, unlink=False):
1023 def remove(self, list, unlink=False):
1024 wlock = None
1024 wlock = None
1025 try:
1025 try:
1026 if unlink:
1026 if unlink:
1027 for f in list:
1027 for f in list:
1028 try:
1028 try:
1029 util.unlink(self.wjoin(f))
1029 util.unlink(self.wjoin(f))
1030 except OSError, inst:
1030 except OSError, inst:
1031 if inst.errno != errno.ENOENT:
1031 if inst.errno != errno.ENOENT:
1032 raise
1032 raise
1033 wlock = self.wlock()
1033 wlock = self.wlock()
1034 for f in list:
1034 for f in list:
1035 if unlink and os.path.exists(self.wjoin(f)):
1035 if unlink and os.path.exists(self.wjoin(f)):
1036 self.ui.warn(_("%s still exists!\n") % f)
1036 self.ui.warn(_("%s still exists!\n") % f)
1037 elif self.dirstate[f] == 'a':
1037 elif self.dirstate[f] == 'a':
1038 self.dirstate.forget(f)
1038 self.dirstate.forget(f)
1039 elif f not in self.dirstate:
1039 elif f not in self.dirstate:
1040 self.ui.warn(_("%s not tracked!\n") % f)
1040 self.ui.warn(_("%s not tracked!\n") % f)
1041 else:
1041 else:
1042 self.dirstate.remove(f)
1042 self.dirstate.remove(f)
1043 finally:
1043 finally:
1044 del wlock
1044 del wlock
1045
1045
1046 def undelete(self, list):
1046 def undelete(self, list):
1047 wlock = None
1047 wlock = None
1048 try:
1048 try:
1049 p = self.dirstate.parents()[0]
1049 p = self.dirstate.parents()[0]
1050 mn = self.changelog.read(p)[0]
1050 mn = self.changelog.read(p)[0]
1051 m = self.manifest.read(mn)
1051 m = self.manifest.read(mn)
1052 wlock = self.wlock()
1052 wlock = self.wlock()
1053 for f in list:
1053 for f in list:
1054 if self.dirstate[f] != 'r':
1054 if self.dirstate[f] != 'r':
1055 self.ui.warn("%s not removed!\n" % f)
1055 self.ui.warn("%s not removed!\n" % f)
1056 else:
1056 else:
1057 t = self.file(f).read(m[f])
1057 t = self.file(f).read(m[f])
1058 self.wwrite(f, t, m.flags(f))
1058 self.wwrite(f, t, m.flags(f))
1059 self.dirstate.normal(f)
1059 self.dirstate.normal(f)
1060 finally:
1060 finally:
1061 del wlock
1061 del wlock
1062
1062
1063 def copy(self, source, dest):
1063 def copy(self, source, dest):
1064 wlock = None
1064 wlock = None
1065 try:
1065 try:
1066 p = self.wjoin(dest)
1066 p = self.wjoin(dest)
1067 if not (os.path.exists(p) or os.path.islink(p)):
1067 if not (os.path.exists(p) or os.path.islink(p)):
1068 self.ui.warn(_("%s does not exist!\n") % dest)
1068 self.ui.warn(_("%s does not exist!\n") % dest)
1069 elif not (os.path.isfile(p) or os.path.islink(p)):
1069 elif not (os.path.isfile(p) or os.path.islink(p)):
1070 self.ui.warn(_("copy failed: %s is not a file or a "
1070 self.ui.warn(_("copy failed: %s is not a file or a "
1071 "symbolic link\n") % dest)
1071 "symbolic link\n") % dest)
1072 else:
1072 else:
1073 wlock = self.wlock()
1073 wlock = self.wlock()
1074 if dest not in self.dirstate:
1074 if dest not in self.dirstate:
1075 self.dirstate.add(dest)
1075 self.dirstate.add(dest)
1076 self.dirstate.copy(source, dest)
1076 self.dirstate.copy(source, dest)
1077 finally:
1077 finally:
1078 del wlock
1078 del wlock
1079
1079
1080 def heads(self, start=None):
1080 def heads(self, start=None):
1081 heads = self.changelog.heads(start)
1081 heads = self.changelog.heads(start)
1082 # sort the output in rev descending order
1082 # sort the output in rev descending order
1083 heads = [(-self.changelog.rev(h), h) for h in heads]
1083 heads = [(-self.changelog.rev(h), h) for h in heads]
1084 heads.sort()
1084 heads.sort()
1085 return [n for (r, n) in heads]
1085 return [n for (r, n) in heads]
1086
1086
1087 def branchheads(self, branch, start=None):
1087 def branchheads(self, branch, start=None):
1088 branches = self.branchtags()
1088 branches = self.branchtags()
1089 if branch not in branches:
1089 if branch not in branches:
1090 return []
1090 return []
1091 # The basic algorithm is this:
1091 # The basic algorithm is this:
1092 #
1092 #
1093 # Start from the branch tip since there are no later revisions that can
1093 # Start from the branch tip since there are no later revisions that can
1094 # possibly be in this branch, and the tip is a guaranteed head.
1094 # possibly be in this branch, and the tip is a guaranteed head.
1095 #
1095 #
1096 # Remember the tip's parents as the first ancestors, since these by
1096 # Remember the tip's parents as the first ancestors, since these by
1097 # definition are not heads.
1097 # definition are not heads.
1098 #
1098 #
1099 # Step backwards from the brach tip through all the revisions. We are
1099 # Step backwards from the brach tip through all the revisions. We are
1100 # guaranteed by the rules of Mercurial that we will now be visiting the
1100 # guaranteed by the rules of Mercurial that we will now be visiting the
1101 # nodes in reverse topological order (children before parents).
1101 # nodes in reverse topological order (children before parents).
1102 #
1102 #
1103 # If a revision is one of the ancestors of a head then we can toss it
1103 # If a revision is one of the ancestors of a head then we can toss it
1104 # out of the ancestors set (we've already found it and won't be
1104 # out of the ancestors set (we've already found it and won't be
1105 # visiting it again) and put its parents in the ancestors set.
1105 # visiting it again) and put its parents in the ancestors set.
1106 #
1106 #
1107 # Otherwise, if a revision is in the branch it's another head, since it
1107 # Otherwise, if a revision is in the branch it's another head, since it
1108 # wasn't in the ancestor list of an existing head. So add it to the
1108 # wasn't in the ancestor list of an existing head. So add it to the
1109 # head list, and add its parents to the ancestor list.
1109 # head list, and add its parents to the ancestor list.
1110 #
1110 #
1111 # If it is not in the branch ignore it.
1111 # If it is not in the branch ignore it.
1112 #
1112 #
1113 # Once we have a list of heads, use nodesbetween to filter out all the
1113 # Once we have a list of heads, use nodesbetween to filter out all the
1114 # heads that cannot be reached from startrev. There may be a more
1114 # heads that cannot be reached from startrev. There may be a more
1115 # efficient way to do this as part of the previous algorithm.
1115 # efficient way to do this as part of the previous algorithm.
1116
1116
1117 set = util.set
1117 set = util.set
1118 heads = [self.changelog.rev(branches[branch])]
1118 heads = [self.changelog.rev(branches[branch])]
1119 # Don't care if ancestors contains nullrev or not.
1119 # Don't care if ancestors contains nullrev or not.
1120 ancestors = set(self.changelog.parentrevs(heads[0]))
1120 ancestors = set(self.changelog.parentrevs(heads[0]))
1121 for rev in xrange(heads[0] - 1, nullrev, -1):
1121 for rev in xrange(heads[0] - 1, nullrev, -1):
1122 if rev in ancestors:
1122 if rev in ancestors:
1123 ancestors.update(self.changelog.parentrevs(rev))
1123 ancestors.update(self.changelog.parentrevs(rev))
1124 ancestors.remove(rev)
1124 ancestors.remove(rev)
1125 elif self.changectx(rev).branch() == branch:
1125 elif self.changectx(rev).branch() == branch:
1126 heads.append(rev)
1126 heads.append(rev)
1127 ancestors.update(self.changelog.parentrevs(rev))
1127 ancestors.update(self.changelog.parentrevs(rev))
1128 heads = [self.changelog.node(rev) for rev in heads]
1128 heads = [self.changelog.node(rev) for rev in heads]
1129 if start is not None:
1129 if start is not None:
1130 heads = self.changelog.nodesbetween([start], heads)[2]
1130 heads = self.changelog.nodesbetween([start], heads)[2]
1131 return heads
1131 return heads
1132
1132
1133 def branches(self, nodes):
1133 def branches(self, nodes):
1134 if not nodes:
1134 if not nodes:
1135 nodes = [self.changelog.tip()]
1135 nodes = [self.changelog.tip()]
1136 b = []
1136 b = []
1137 for n in nodes:
1137 for n in nodes:
1138 t = n
1138 t = n
1139 while 1:
1139 while 1:
1140 p = self.changelog.parents(n)
1140 p = self.changelog.parents(n)
1141 if p[1] != nullid or p[0] == nullid:
1141 if p[1] != nullid or p[0] == nullid:
1142 b.append((t, n, p[0], p[1]))
1142 b.append((t, n, p[0], p[1]))
1143 break
1143 break
1144 n = p[0]
1144 n = p[0]
1145 return b
1145 return b
1146
1146
1147 def between(self, pairs):
1147 def between(self, pairs):
1148 r = []
1148 r = []
1149
1149
1150 for top, bottom in pairs:
1150 for top, bottom in pairs:
1151 n, l, i = top, [], 0
1151 n, l, i = top, [], 0
1152 f = 1
1152 f = 1
1153
1153
1154 while n != bottom:
1154 while n != bottom:
1155 p = self.changelog.parents(n)[0]
1155 p = self.changelog.parents(n)[0]
1156 if i == f:
1156 if i == f:
1157 l.append(n)
1157 l.append(n)
1158 f = f * 2
1158 f = f * 2
1159 n = p
1159 n = p
1160 i += 1
1160 i += 1
1161
1161
1162 r.append(l)
1162 r.append(l)
1163
1163
1164 return r
1164 return r
1165
1165
1166 def findincoming(self, remote, base=None, heads=None, force=False):
1166 def findincoming(self, remote, base=None, heads=None, force=False):
1167 """Return list of roots of the subsets of missing nodes from remote
1167 """Return list of roots of the subsets of missing nodes from remote
1168
1168
1169 If base dict is specified, assume that these nodes and their parents
1169 If base dict is specified, assume that these nodes and their parents
1170 exist on the remote side and that no child of a node of base exists
1170 exist on the remote side and that no child of a node of base exists
1171 in both remote and self.
1171 in both remote and self.
1172 Furthermore base will be updated to include the nodes that exists
1172 Furthermore base will be updated to include the nodes that exists
1173 in self and remote but no children exists in self and remote.
1173 in self and remote but no children exists in self and remote.
1174 If a list of heads is specified, return only nodes which are heads
1174 If a list of heads is specified, return only nodes which are heads
1175 or ancestors of these heads.
1175 or ancestors of these heads.
1176
1176
1177 All the ancestors of base are in self and in remote.
1177 All the ancestors of base are in self and in remote.
1178 All the descendants of the list returned are missing in self.
1178 All the descendants of the list returned are missing in self.
1179 (and so we know that the rest of the nodes are missing in remote, see
1179 (and so we know that the rest of the nodes are missing in remote, see
1180 outgoing)
1180 outgoing)
1181 """
1181 """
1182 m = self.changelog.nodemap
1182 m = self.changelog.nodemap
1183 search = []
1183 search = []
1184 fetch = {}
1184 fetch = {}
1185 seen = {}
1185 seen = {}
1186 seenbranch = {}
1186 seenbranch = {}
1187 if base == None:
1187 if base == None:
1188 base = {}
1188 base = {}
1189
1189
1190 if not heads:
1190 if not heads:
1191 heads = remote.heads()
1191 heads = remote.heads()
1192
1192
1193 if self.changelog.tip() == nullid:
1193 if self.changelog.tip() == nullid:
1194 base[nullid] = 1
1194 base[nullid] = 1
1195 if heads != [nullid]:
1195 if heads != [nullid]:
1196 return [nullid]
1196 return [nullid]
1197 return []
1197 return []
1198
1198
1199 # assume we're closer to the tip than the root
1199 # assume we're closer to the tip than the root
1200 # and start by examining the heads
1200 # and start by examining the heads
1201 self.ui.status(_("searching for changes\n"))
1201 self.ui.status(_("searching for changes\n"))
1202
1202
1203 unknown = []
1203 unknown = []
1204 for h in heads:
1204 for h in heads:
1205 if h not in m:
1205 if h not in m:
1206 unknown.append(h)
1206 unknown.append(h)
1207 else:
1207 else:
1208 base[h] = 1
1208 base[h] = 1
1209
1209
1210 if not unknown:
1210 if not unknown:
1211 return []
1211 return []
1212
1212
1213 req = dict.fromkeys(unknown)
1213 req = dict.fromkeys(unknown)
1214 reqcnt = 0
1214 reqcnt = 0
1215
1215
1216 # search through remote branches
1216 # search through remote branches
1217 # a 'branch' here is a linear segment of history, with four parts:
1217 # a 'branch' here is a linear segment of history, with four parts:
1218 # head, root, first parent, second parent
1218 # head, root, first parent, second parent
1219 # (a branch always has two parents (or none) by definition)
1219 # (a branch always has two parents (or none) by definition)
1220 unknown = remote.branches(unknown)
1220 unknown = remote.branches(unknown)
1221 while unknown:
1221 while unknown:
1222 r = []
1222 r = []
1223 while unknown:
1223 while unknown:
1224 n = unknown.pop(0)
1224 n = unknown.pop(0)
1225 if n[0] in seen:
1225 if n[0] in seen:
1226 continue
1226 continue
1227
1227
1228 self.ui.debug(_("examining %s:%s\n")
1228 self.ui.debug(_("examining %s:%s\n")
1229 % (short(n[0]), short(n[1])))
1229 % (short(n[0]), short(n[1])))
1230 if n[0] == nullid: # found the end of the branch
1230 if n[0] == nullid: # found the end of the branch
1231 pass
1231 pass
1232 elif n in seenbranch:
1232 elif n in seenbranch:
1233 self.ui.debug(_("branch already found\n"))
1233 self.ui.debug(_("branch already found\n"))
1234 continue
1234 continue
1235 elif n[1] and n[1] in m: # do we know the base?
1235 elif n[1] and n[1] in m: # do we know the base?
1236 self.ui.debug(_("found incomplete branch %s:%s\n")
1236 self.ui.debug(_("found incomplete branch %s:%s\n")
1237 % (short(n[0]), short(n[1])))
1237 % (short(n[0]), short(n[1])))
1238 search.append(n) # schedule branch range for scanning
1238 search.append(n) # schedule branch range for scanning
1239 seenbranch[n] = 1
1239 seenbranch[n] = 1
1240 else:
1240 else:
1241 if n[1] not in seen and n[1] not in fetch:
1241 if n[1] not in seen and n[1] not in fetch:
1242 if n[2] in m and n[3] in m:
1242 if n[2] in m and n[3] in m:
1243 self.ui.debug(_("found new changeset %s\n") %
1243 self.ui.debug(_("found new changeset %s\n") %
1244 short(n[1]))
1244 short(n[1]))
1245 fetch[n[1]] = 1 # earliest unknown
1245 fetch[n[1]] = 1 # earliest unknown
1246 for p in n[2:4]:
1246 for p in n[2:4]:
1247 if p in m:
1247 if p in m:
1248 base[p] = 1 # latest known
1248 base[p] = 1 # latest known
1249
1249
1250 for p in n[2:4]:
1250 for p in n[2:4]:
1251 if p not in req and p not in m:
1251 if p not in req and p not in m:
1252 r.append(p)
1252 r.append(p)
1253 req[p] = 1
1253 req[p] = 1
1254 seen[n[0]] = 1
1254 seen[n[0]] = 1
1255
1255
1256 if r:
1256 if r:
1257 reqcnt += 1
1257 reqcnt += 1
1258 self.ui.debug(_("request %d: %s\n") %
1258 self.ui.debug(_("request %d: %s\n") %
1259 (reqcnt, " ".join(map(short, r))))
1259 (reqcnt, " ".join(map(short, r))))
1260 for p in xrange(0, len(r), 10):
1260 for p in xrange(0, len(r), 10):
1261 for b in remote.branches(r[p:p+10]):
1261 for b in remote.branches(r[p:p+10]):
1262 self.ui.debug(_("received %s:%s\n") %
1262 self.ui.debug(_("received %s:%s\n") %
1263 (short(b[0]), short(b[1])))
1263 (short(b[0]), short(b[1])))
1264 unknown.append(b)
1264 unknown.append(b)
1265
1265
1266 # do binary search on the branches we found
1266 # do binary search on the branches we found
1267 while search:
1267 while search:
1268 n = search.pop(0)
1268 n = search.pop(0)
1269 reqcnt += 1
1269 reqcnt += 1
1270 l = remote.between([(n[0], n[1])])[0]
1270 l = remote.between([(n[0], n[1])])[0]
1271 l.append(n[1])
1271 l.append(n[1])
1272 p = n[0]
1272 p = n[0]
1273 f = 1
1273 f = 1
1274 for i in l:
1274 for i in l:
1275 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1275 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1276 if i in m:
1276 if i in m:
1277 if f <= 2:
1277 if f <= 2:
1278 self.ui.debug(_("found new branch changeset %s\n") %
1278 self.ui.debug(_("found new branch changeset %s\n") %
1279 short(p))
1279 short(p))
1280 fetch[p] = 1
1280 fetch[p] = 1
1281 base[i] = 1
1281 base[i] = 1
1282 else:
1282 else:
1283 self.ui.debug(_("narrowed branch search to %s:%s\n")
1283 self.ui.debug(_("narrowed branch search to %s:%s\n")
1284 % (short(p), short(i)))
1284 % (short(p), short(i)))
1285 search.append((p, i))
1285 search.append((p, i))
1286 break
1286 break
1287 p, f = i, f * 2
1287 p, f = i, f * 2
1288
1288
1289 # sanity check our fetch list
1289 # sanity check our fetch list
1290 for f in fetch.keys():
1290 for f in fetch.keys():
1291 if f in m:
1291 if f in m:
1292 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1292 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1293
1293
1294 if base.keys() == [nullid]:
1294 if base.keys() == [nullid]:
1295 if force:
1295 if force:
1296 self.ui.warn(_("warning: repository is unrelated\n"))
1296 self.ui.warn(_("warning: repository is unrelated\n"))
1297 else:
1297 else:
1298 raise util.Abort(_("repository is unrelated"))
1298 raise util.Abort(_("repository is unrelated"))
1299
1299
1300 self.ui.debug(_("found new changesets starting at ") +
1300 self.ui.debug(_("found new changesets starting at ") +
1301 " ".join([short(f) for f in fetch]) + "\n")
1301 " ".join([short(f) for f in fetch]) + "\n")
1302
1302
1303 self.ui.debug(_("%d total queries\n") % reqcnt)
1303 self.ui.debug(_("%d total queries\n") % reqcnt)
1304
1304
1305 return fetch.keys()
1305 return fetch.keys()
1306
1306
1307 def findoutgoing(self, remote, base=None, heads=None, force=False):
1307 def findoutgoing(self, remote, base=None, heads=None, force=False):
1308 """Return list of nodes that are roots of subsets not in remote
1308 """Return list of nodes that are roots of subsets not in remote
1309
1309
1310 If base dict is specified, assume that these nodes and their parents
1310 If base dict is specified, assume that these nodes and their parents
1311 exist on the remote side.
1311 exist on the remote side.
1312 If a list of heads is specified, return only nodes which are heads
1312 If a list of heads is specified, return only nodes which are heads
1313 or ancestors of these heads, and return a second element which
1313 or ancestors of these heads, and return a second element which
1314 contains all remote heads which get new children.
1314 contains all remote heads which get new children.
1315 """
1315 """
1316 if base == None:
1316 if base == None:
1317 base = {}
1317 base = {}
1318 self.findincoming(remote, base, heads, force=force)
1318 self.findincoming(remote, base, heads, force=force)
1319
1319
1320 self.ui.debug(_("common changesets up to ")
1320 self.ui.debug(_("common changesets up to ")
1321 + " ".join(map(short, base.keys())) + "\n")
1321 + " ".join(map(short, base.keys())) + "\n")
1322
1322
1323 remain = dict.fromkeys(self.changelog.nodemap)
1323 remain = dict.fromkeys(self.changelog.nodemap)
1324
1324
1325 # prune everything remote has from the tree
1325 # prune everything remote has from the tree
1326 del remain[nullid]
1326 del remain[nullid]
1327 remove = base.keys()
1327 remove = base.keys()
1328 while remove:
1328 while remove:
1329 n = remove.pop(0)
1329 n = remove.pop(0)
1330 if n in remain:
1330 if n in remain:
1331 del remain[n]
1331 del remain[n]
1332 for p in self.changelog.parents(n):
1332 for p in self.changelog.parents(n):
1333 remove.append(p)
1333 remove.append(p)
1334
1334
1335 # find every node whose parents have been pruned
1335 # find every node whose parents have been pruned
1336 subset = []
1336 subset = []
1337 # find every remote head that will get new children
1337 # find every remote head that will get new children
1338 updated_heads = {}
1338 updated_heads = {}
1339 for n in remain:
1339 for n in remain:
1340 p1, p2 = self.changelog.parents(n)
1340 p1, p2 = self.changelog.parents(n)
1341 if p1 not in remain and p2 not in remain:
1341 if p1 not in remain and p2 not in remain:
1342 subset.append(n)
1342 subset.append(n)
1343 if heads:
1343 if heads:
1344 if p1 in heads:
1344 if p1 in heads:
1345 updated_heads[p1] = True
1345 updated_heads[p1] = True
1346 if p2 in heads:
1346 if p2 in heads:
1347 updated_heads[p2] = True
1347 updated_heads[p2] = True
1348
1348
1349 # this is the set of all roots we have to push
1349 # this is the set of all roots we have to push
1350 if heads:
1350 if heads:
1351 return subset, updated_heads.keys()
1351 return subset, updated_heads.keys()
1352 else:
1352 else:
1353 return subset
1353 return subset
1354
1354
1355 def pull(self, remote, heads=None, force=False):
1355 def pull(self, remote, heads=None, force=False):
1356 lock = self.lock()
1356 lock = self.lock()
1357 try:
1357 try:
1358 fetch = self.findincoming(remote, heads=heads, force=force)
1358 fetch = self.findincoming(remote, heads=heads, force=force)
1359 if fetch == [nullid]:
1359 if fetch == [nullid]:
1360 self.ui.status(_("requesting all changes\n"))
1360 self.ui.status(_("requesting all changes\n"))
1361
1361
1362 if not fetch:
1362 if not fetch:
1363 self.ui.status(_("no changes found\n"))
1363 self.ui.status(_("no changes found\n"))
1364 return 0
1364 return 0
1365
1365
1366 if heads is None:
1366 if heads is None:
1367 cg = remote.changegroup(fetch, 'pull')
1367 cg = remote.changegroup(fetch, 'pull')
1368 else:
1368 else:
1369 if 'changegroupsubset' not in remote.capabilities:
1369 if 'changegroupsubset' not in remote.capabilities:
1370 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1370 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1371 cg = remote.changegroupsubset(fetch, heads, 'pull')
1371 cg = remote.changegroupsubset(fetch, heads, 'pull')
1372 return self.addchangegroup(cg, 'pull', remote.url())
1372 return self.addchangegroup(cg, 'pull', remote.url())
1373 finally:
1373 finally:
1374 del lock
1374 del lock
1375
1375
1376 def push(self, remote, force=False, revs=None):
1376 def push(self, remote, force=False, revs=None):
1377 # there are two ways to push to remote repo:
1377 # there are two ways to push to remote repo:
1378 #
1378 #
1379 # addchangegroup assumes local user can lock remote
1379 # addchangegroup assumes local user can lock remote
1380 # repo (local filesystem, old ssh servers).
1380 # repo (local filesystem, old ssh servers).
1381 #
1381 #
1382 # unbundle assumes local user cannot lock remote repo (new ssh
1382 # unbundle assumes local user cannot lock remote repo (new ssh
1383 # servers, http servers).
1383 # servers, http servers).
1384
1384
1385 if remote.capable('unbundle'):
1385 if remote.capable('unbundle'):
1386 return self.push_unbundle(remote, force, revs)
1386 return self.push_unbundle(remote, force, revs)
1387 return self.push_addchangegroup(remote, force, revs)
1387 return self.push_addchangegroup(remote, force, revs)
1388
1388
1389 def prepush(self, remote, force, revs):
1389 def prepush(self, remote, force, revs):
1390 base = {}
1390 base = {}
1391 remote_heads = remote.heads()
1391 remote_heads = remote.heads()
1392 inc = self.findincoming(remote, base, remote_heads, force=force)
1392 inc = self.findincoming(remote, base, remote_heads, force=force)
1393
1393
1394 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1394 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1395 if revs is not None:
1395 if revs is not None:
1396 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1396 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1397 else:
1397 else:
1398 bases, heads = update, self.changelog.heads()
1398 bases, heads = update, self.changelog.heads()
1399
1399
1400 if not bases:
1400 if not bases:
1401 self.ui.status(_("no changes found\n"))
1401 self.ui.status(_("no changes found\n"))
1402 return None, 1
1402 return None, 1
1403 elif not force:
1403 elif not force:
1404 # check if we're creating new remote heads
1404 # check if we're creating new remote heads
1405 # to be a remote head after push, node must be either
1405 # to be a remote head after push, node must be either
1406 # - unknown locally
1406 # - unknown locally
1407 # - a local outgoing head descended from update
1407 # - a local outgoing head descended from update
1408 # - a remote head that's known locally and not
1408 # - a remote head that's known locally and not
1409 # ancestral to an outgoing head
1409 # ancestral to an outgoing head
1410
1410
1411 warn = 0
1411 warn = 0
1412
1412
1413 if remote_heads == [nullid]:
1413 if remote_heads == [nullid]:
1414 warn = 0
1414 warn = 0
1415 elif not revs and len(heads) > len(remote_heads):
1415 elif not revs and len(heads) > len(remote_heads):
1416 warn = 1
1416 warn = 1
1417 else:
1417 else:
1418 newheads = list(heads)
1418 newheads = list(heads)
1419 for r in remote_heads:
1419 for r in remote_heads:
1420 if r in self.changelog.nodemap:
1420 if r in self.changelog.nodemap:
1421 desc = self.changelog.heads(r, heads)
1421 desc = self.changelog.heads(r, heads)
1422 l = [h for h in heads if h in desc]
1422 l = [h for h in heads if h in desc]
1423 if not l:
1423 if not l:
1424 newheads.append(r)
1424 newheads.append(r)
1425 else:
1425 else:
1426 newheads.append(r)
1426 newheads.append(r)
1427 if len(newheads) > len(remote_heads):
1427 if len(newheads) > len(remote_heads):
1428 warn = 1
1428 warn = 1
1429
1429
1430 if warn:
1430 if warn:
1431 self.ui.warn(_("abort: push creates new remote branches!\n"))
1431 self.ui.warn(_("abort: push creates new remote branches!\n"))
1432 self.ui.status(_("(did you forget to merge?"
1432 self.ui.status(_("(did you forget to merge?"
1433 " use push -f to force)\n"))
1433 " use push -f to force)\n"))
1434 return None, 1
1434 return None, 1
1435 elif inc:
1435 elif inc:
1436 self.ui.warn(_("note: unsynced remote changes!\n"))
1436 self.ui.warn(_("note: unsynced remote changes!\n"))
1437
1437
1438
1438
1439 if revs is None:
1439 if revs is None:
1440 cg = self.changegroup(update, 'push')
1440 cg = self.changegroup(update, 'push')
1441 else:
1441 else:
1442 cg = self.changegroupsubset(update, revs, 'push')
1442 cg = self.changegroupsubset(update, revs, 'push')
1443 return cg, remote_heads
1443 return cg, remote_heads
1444
1444
1445 def push_addchangegroup(self, remote, force, revs):
1445 def push_addchangegroup(self, remote, force, revs):
1446 lock = remote.lock()
1446 lock = remote.lock()
1447 try:
1447 try:
1448 ret = self.prepush(remote, force, revs)
1448 ret = self.prepush(remote, force, revs)
1449 if ret[0] is not None:
1449 if ret[0] is not None:
1450 cg, remote_heads = ret
1450 cg, remote_heads = ret
1451 return remote.addchangegroup(cg, 'push', self.url())
1451 return remote.addchangegroup(cg, 'push', self.url())
1452 return ret[1]
1452 return ret[1]
1453 finally:
1453 finally:
1454 del lock
1454 del lock
1455
1455
1456 def push_unbundle(self, remote, force, revs):
1456 def push_unbundle(self, remote, force, revs):
1457 # local repo finds heads on server, finds out what revs it
1457 # local repo finds heads on server, finds out what revs it
1458 # must push. once revs transferred, if server finds it has
1458 # must push. once revs transferred, if server finds it has
1459 # different heads (someone else won commit/push race), server
1459 # different heads (someone else won commit/push race), server
1460 # aborts.
1460 # aborts.
1461
1461
1462 ret = self.prepush(remote, force, revs)
1462 ret = self.prepush(remote, force, revs)
1463 if ret[0] is not None:
1463 if ret[0] is not None:
1464 cg, remote_heads = ret
1464 cg, remote_heads = ret
1465 if force: remote_heads = ['force']
1465 if force: remote_heads = ['force']
1466 return remote.unbundle(cg, remote_heads, 'push')
1466 return remote.unbundle(cg, remote_heads, 'push')
1467 return ret[1]
1467 return ret[1]
1468
1468
1469 def changegroupinfo(self, nodes):
1469 def changegroupinfo(self, nodes):
1470 self.ui.note(_("%d changesets found\n") % len(nodes))
1470 self.ui.note(_("%d changesets found\n") % len(nodes))
1471 if self.ui.debugflag:
1471 if self.ui.debugflag:
1472 self.ui.debug(_("List of changesets:\n"))
1472 self.ui.debug(_("List of changesets:\n"))
1473 for node in nodes:
1473 for node in nodes:
1474 self.ui.debug("%s\n" % hex(node))
1474 self.ui.debug("%s\n" % hex(node))
1475
1475
1476 def changegroupsubset(self, bases, heads, source):
1476 def changegroupsubset(self, bases, heads, source):
1477 """This function generates a changegroup consisting of all the nodes
1477 """This function generates a changegroup consisting of all the nodes
1478 that are descendents of any of the bases, and ancestors of any of
1478 that are descendents of any of the bases, and ancestors of any of
1479 the heads.
1479 the heads.
1480
1480
1481 It is fairly complex as determining which filenodes and which
1481 It is fairly complex as determining which filenodes and which
1482 manifest nodes need to be included for the changeset to be complete
1482 manifest nodes need to be included for the changeset to be complete
1483 is non-trivial.
1483 is non-trivial.
1484
1484
1485 Another wrinkle is doing the reverse, figuring out which changeset in
1485 Another wrinkle is doing the reverse, figuring out which changeset in
1486 the changegroup a particular filenode or manifestnode belongs to."""
1486 the changegroup a particular filenode or manifestnode belongs to."""
1487
1487
1488 self.hook('preoutgoing', throw=True, source=source)
1488 self.hook('preoutgoing', throw=True, source=source)
1489
1489
1490 # Set up some initial variables
1490 # Set up some initial variables
1491 # Make it easy to refer to self.changelog
1491 # Make it easy to refer to self.changelog
1492 cl = self.changelog
1492 cl = self.changelog
1493 # msng is short for missing - compute the list of changesets in this
1493 # msng is short for missing - compute the list of changesets in this
1494 # changegroup.
1494 # changegroup.
1495 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1495 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1496 self.changegroupinfo(msng_cl_lst)
1496 self.changegroupinfo(msng_cl_lst)
1497 # Some bases may turn out to be superfluous, and some heads may be
1497 # Some bases may turn out to be superfluous, and some heads may be
1498 # too. nodesbetween will return the minimal set of bases and heads
1498 # too. nodesbetween will return the minimal set of bases and heads
1499 # necessary to re-create the changegroup.
1499 # necessary to re-create the changegroup.
1500
1500
1501 # Known heads are the list of heads that it is assumed the recipient
1501 # Known heads are the list of heads that it is assumed the recipient
1502 # of this changegroup will know about.
1502 # of this changegroup will know about.
1503 knownheads = {}
1503 knownheads = {}
1504 # We assume that all parents of bases are known heads.
1504 # We assume that all parents of bases are known heads.
1505 for n in bases:
1505 for n in bases:
1506 for p in cl.parents(n):
1506 for p in cl.parents(n):
1507 if p != nullid:
1507 if p != nullid:
1508 knownheads[p] = 1
1508 knownheads[p] = 1
1509 knownheads = knownheads.keys()
1509 knownheads = knownheads.keys()
1510 if knownheads:
1510 if knownheads:
1511 # Now that we know what heads are known, we can compute which
1511 # Now that we know what heads are known, we can compute which
1512 # changesets are known. The recipient must know about all
1512 # changesets are known. The recipient must know about all
1513 # changesets required to reach the known heads from the null
1513 # changesets required to reach the known heads from the null
1514 # changeset.
1514 # changeset.
1515 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1515 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1516 junk = None
1516 junk = None
1517 # Transform the list into an ersatz set.
1517 # Transform the list into an ersatz set.
1518 has_cl_set = dict.fromkeys(has_cl_set)
1518 has_cl_set = dict.fromkeys(has_cl_set)
1519 else:
1519 else:
1520 # If there were no known heads, the recipient cannot be assumed to
1520 # If there were no known heads, the recipient cannot be assumed to
1521 # know about any changesets.
1521 # know about any changesets.
1522 has_cl_set = {}
1522 has_cl_set = {}
1523
1523
1524 # Make it easy to refer to self.manifest
1524 # Make it easy to refer to self.manifest
1525 mnfst = self.manifest
1525 mnfst = self.manifest
1526 # We don't know which manifests are missing yet
1526 # We don't know which manifests are missing yet
1527 msng_mnfst_set = {}
1527 msng_mnfst_set = {}
1528 # Nor do we know which filenodes are missing.
1528 # Nor do we know which filenodes are missing.
1529 msng_filenode_set = {}
1529 msng_filenode_set = {}
1530
1530
1531 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1531 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1532 junk = None
1532 junk = None
1533
1533
1534 # A changeset always belongs to itself, so the changenode lookup
1534 # A changeset always belongs to itself, so the changenode lookup
1535 # function for a changenode is identity.
1535 # function for a changenode is identity.
1536 def identity(x):
1536 def identity(x):
1537 return x
1537 return x
1538
1538
1539 # A function generating function. Sets up an environment for the
1539 # A function generating function. Sets up an environment for the
1540 # inner function.
1540 # inner function.
1541 def cmp_by_rev_func(revlog):
1541 def cmp_by_rev_func(revlog):
1542 # Compare two nodes by their revision number in the environment's
1542 # Compare two nodes by their revision number in the environment's
1543 # revision history. Since the revision number both represents the
1543 # revision history. Since the revision number both represents the
1544 # most efficient order to read the nodes in, and represents a
1544 # most efficient order to read the nodes in, and represents a
1545 # topological sorting of the nodes, this function is often useful.
1545 # topological sorting of the nodes, this function is often useful.
1546 def cmp_by_rev(a, b):
1546 def cmp_by_rev(a, b):
1547 return cmp(revlog.rev(a), revlog.rev(b))
1547 return cmp(revlog.rev(a), revlog.rev(b))
1548 return cmp_by_rev
1548 return cmp_by_rev
1549
1549
1550 # If we determine that a particular file or manifest node must be a
1550 # If we determine that a particular file or manifest node must be a
1551 # node that the recipient of the changegroup will already have, we can
1551 # node that the recipient of the changegroup will already have, we can
1552 # also assume the recipient will have all the parents. This function
1552 # also assume the recipient will have all the parents. This function
1553 # prunes them from the set of missing nodes.
1553 # prunes them from the set of missing nodes.
1554 def prune_parents(revlog, hasset, msngset):
1554 def prune_parents(revlog, hasset, msngset):
1555 haslst = hasset.keys()
1555 haslst = hasset.keys()
1556 haslst.sort(cmp_by_rev_func(revlog))
1556 haslst.sort(cmp_by_rev_func(revlog))
1557 for node in haslst:
1557 for node in haslst:
1558 parentlst = [p for p in revlog.parents(node) if p != nullid]
1558 parentlst = [p for p in revlog.parents(node) if p != nullid]
1559 while parentlst:
1559 while parentlst:
1560 n = parentlst.pop()
1560 n = parentlst.pop()
1561 if n not in hasset:
1561 if n not in hasset:
1562 hasset[n] = 1
1562 hasset[n] = 1
1563 p = [p for p in revlog.parents(n) if p != nullid]
1563 p = [p for p in revlog.parents(n) if p != nullid]
1564 parentlst.extend(p)
1564 parentlst.extend(p)
1565 for n in hasset:
1565 for n in hasset:
1566 msngset.pop(n, None)
1566 msngset.pop(n, None)
1567
1567
1568 # This is a function generating function used to set up an environment
1568 # This is a function generating function used to set up an environment
1569 # for the inner function to execute in.
1569 # for the inner function to execute in.
1570 def manifest_and_file_collector(changedfileset):
1570 def manifest_and_file_collector(changedfileset):
1571 # This is an information gathering function that gathers
1571 # This is an information gathering function that gathers
1572 # information from each changeset node that goes out as part of
1572 # information from each changeset node that goes out as part of
1573 # the changegroup. The information gathered is a list of which
1573 # the changegroup. The information gathered is a list of which
1574 # manifest nodes are potentially required (the recipient may
1574 # manifest nodes are potentially required (the recipient may
1575 # already have them) and total list of all files which were
1575 # already have them) and total list of all files which were
1576 # changed in any changeset in the changegroup.
1576 # changed in any changeset in the changegroup.
1577 #
1577 #
1578 # We also remember the first changenode we saw any manifest
1578 # We also remember the first changenode we saw any manifest
1579 # referenced by so we can later determine which changenode 'owns'
1579 # referenced by so we can later determine which changenode 'owns'
1580 # the manifest.
1580 # the manifest.
1581 def collect_manifests_and_files(clnode):
1581 def collect_manifests_and_files(clnode):
1582 c = cl.read(clnode)
1582 c = cl.read(clnode)
1583 for f in c[3]:
1583 for f in c[3]:
1584 # This is to make sure we only have one instance of each
1584 # This is to make sure we only have one instance of each
1585 # filename string for each filename.
1585 # filename string for each filename.
1586 changedfileset.setdefault(f, f)
1586 changedfileset.setdefault(f, f)
1587 msng_mnfst_set.setdefault(c[0], clnode)
1587 msng_mnfst_set.setdefault(c[0], clnode)
1588 return collect_manifests_and_files
1588 return collect_manifests_and_files
1589
1589
1590 # Figure out which manifest nodes (of the ones we think might be part
1590 # Figure out which manifest nodes (of the ones we think might be part
1591 # of the changegroup) the recipient must know about and remove them
1591 # of the changegroup) the recipient must know about and remove them
1592 # from the changegroup.
1592 # from the changegroup.
1593 def prune_manifests():
1593 def prune_manifests():
1594 has_mnfst_set = {}
1594 has_mnfst_set = {}
1595 for n in msng_mnfst_set:
1595 for n in msng_mnfst_set:
1596 # If a 'missing' manifest thinks it belongs to a changenode
1596 # If a 'missing' manifest thinks it belongs to a changenode
1597 # the recipient is assumed to have, obviously the recipient
1597 # the recipient is assumed to have, obviously the recipient
1598 # must have that manifest.
1598 # must have that manifest.
1599 linknode = cl.node(mnfst.linkrev(n))
1599 linknode = cl.node(mnfst.linkrev(n))
1600 if linknode in has_cl_set:
1600 if linknode in has_cl_set:
1601 has_mnfst_set[n] = 1
1601 has_mnfst_set[n] = 1
1602 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1602 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1603
1603
1604 # Use the information collected in collect_manifests_and_files to say
1604 # Use the information collected in collect_manifests_and_files to say
1605 # which changenode any manifestnode belongs to.
1605 # which changenode any manifestnode belongs to.
1606 def lookup_manifest_link(mnfstnode):
1606 def lookup_manifest_link(mnfstnode):
1607 return msng_mnfst_set[mnfstnode]
1607 return msng_mnfst_set[mnfstnode]
1608
1608
1609 # A function generating function that sets up the initial environment
1609 # A function generating function that sets up the initial environment
1610 # the inner function.
1610 # the inner function.
1611 def filenode_collector(changedfiles):
1611 def filenode_collector(changedfiles):
1612 next_rev = [0]
1612 next_rev = [0]
1613 # This gathers information from each manifestnode included in the
1613 # This gathers information from each manifestnode included in the
1614 # changegroup about which filenodes the manifest node references
1614 # changegroup about which filenodes the manifest node references
1615 # so we can include those in the changegroup too.
1615 # so we can include those in the changegroup too.
1616 #
1616 #
1617 # It also remembers which changenode each filenode belongs to. It
1617 # It also remembers which changenode each filenode belongs to. It
1618 # does this by assuming the a filenode belongs to the changenode
1618 # does this by assuming the a filenode belongs to the changenode
1619 # the first manifest that references it belongs to.
1619 # the first manifest that references it belongs to.
1620 def collect_msng_filenodes(mnfstnode):
1620 def collect_msng_filenodes(mnfstnode):
1621 r = mnfst.rev(mnfstnode)
1621 r = mnfst.rev(mnfstnode)
1622 if r == next_rev[0]:
1622 if r == next_rev[0]:
1623 # If the last rev we looked at was the one just previous,
1623 # If the last rev we looked at was the one just previous,
1624 # we only need to see a diff.
1624 # we only need to see a diff.
1625 deltamf = mnfst.readdelta(mnfstnode)
1625 deltamf = mnfst.readdelta(mnfstnode)
1626 # For each line in the delta
1626 # For each line in the delta
1627 for f, fnode in deltamf.items():
1627 for f, fnode in deltamf.items():
1628 f = changedfiles.get(f, None)
1628 f = changedfiles.get(f, None)
1629 # And if the file is in the list of files we care
1629 # And if the file is in the list of files we care
1630 # about.
1630 # about.
1631 if f is not None:
1631 if f is not None:
1632 # Get the changenode this manifest belongs to
1632 # Get the changenode this manifest belongs to
1633 clnode = msng_mnfst_set[mnfstnode]
1633 clnode = msng_mnfst_set[mnfstnode]
1634 # Create the set of filenodes for the file if
1634 # Create the set of filenodes for the file if
1635 # there isn't one already.
1635 # there isn't one already.
1636 ndset = msng_filenode_set.setdefault(f, {})
1636 ndset = msng_filenode_set.setdefault(f, {})
1637 # And set the filenode's changelog node to the
1637 # And set the filenode's changelog node to the
1638 # manifest's if it hasn't been set already.
1638 # manifest's if it hasn't been set already.
1639 ndset.setdefault(fnode, clnode)
1639 ndset.setdefault(fnode, clnode)
1640 else:
1640 else:
1641 # Otherwise we need a full manifest.
1641 # Otherwise we need a full manifest.
1642 m = mnfst.read(mnfstnode)
1642 m = mnfst.read(mnfstnode)
1643 # For every file in we care about.
1643 # For every file in we care about.
1644 for f in changedfiles:
1644 for f in changedfiles:
1645 fnode = m.get(f, None)
1645 fnode = m.get(f, None)
1646 # If it's in the manifest
1646 # If it's in the manifest
1647 if fnode is not None:
1647 if fnode is not None:
1648 # See comments above.
1648 # See comments above.
1649 clnode = msng_mnfst_set[mnfstnode]
1649 clnode = msng_mnfst_set[mnfstnode]
1650 ndset = msng_filenode_set.setdefault(f, {})
1650 ndset = msng_filenode_set.setdefault(f, {})
1651 ndset.setdefault(fnode, clnode)
1651 ndset.setdefault(fnode, clnode)
1652 # Remember the revision we hope to see next.
1652 # Remember the revision we hope to see next.
1653 next_rev[0] = r + 1
1653 next_rev[0] = r + 1
1654 return collect_msng_filenodes
1654 return collect_msng_filenodes
1655
1655
1656 # We have a list of filenodes we think we need for a file, lets remove
1656 # We have a list of filenodes we think we need for a file, lets remove
1657 # all those we now the recipient must have.
1657 # all those we now the recipient must have.
1658 def prune_filenodes(f, filerevlog):
1658 def prune_filenodes(f, filerevlog):
1659 msngset = msng_filenode_set[f]
1659 msngset = msng_filenode_set[f]
1660 hasset = {}
1660 hasset = {}
1661 # If a 'missing' filenode thinks it belongs to a changenode we
1661 # If a 'missing' filenode thinks it belongs to a changenode we
1662 # assume the recipient must have, then the recipient must have
1662 # assume the recipient must have, then the recipient must have
1663 # that filenode.
1663 # that filenode.
1664 for n in msngset:
1664 for n in msngset:
1665 clnode = cl.node(filerevlog.linkrev(n))
1665 clnode = cl.node(filerevlog.linkrev(n))
1666 if clnode in has_cl_set:
1666 if clnode in has_cl_set:
1667 hasset[n] = 1
1667 hasset[n] = 1
1668 prune_parents(filerevlog, hasset, msngset)
1668 prune_parents(filerevlog, hasset, msngset)
1669
1669
1670 # A function generator function that sets up the a context for the
1670 # A function generator function that sets up the a context for the
1671 # inner function.
1671 # inner function.
1672 def lookup_filenode_link_func(fname):
1672 def lookup_filenode_link_func(fname):
1673 msngset = msng_filenode_set[fname]
1673 msngset = msng_filenode_set[fname]
1674 # Lookup the changenode the filenode belongs to.
1674 # Lookup the changenode the filenode belongs to.
1675 def lookup_filenode_link(fnode):
1675 def lookup_filenode_link(fnode):
1676 return msngset[fnode]
1676 return msngset[fnode]
1677 return lookup_filenode_link
1677 return lookup_filenode_link
1678
1678
1679 # Now that we have all theses utility functions to help out and
1679 # Now that we have all theses utility functions to help out and
1680 # logically divide up the task, generate the group.
1680 # logically divide up the task, generate the group.
1681 def gengroup():
1681 def gengroup():
1682 # The set of changed files starts empty.
1682 # The set of changed files starts empty.
1683 changedfiles = {}
1683 changedfiles = {}
1684 # Create a changenode group generator that will call our functions
1684 # Create a changenode group generator that will call our functions
1685 # back to lookup the owning changenode and collect information.
1685 # back to lookup the owning changenode and collect information.
1686 group = cl.group(msng_cl_lst, identity,
1686 group = cl.group(msng_cl_lst, identity,
1687 manifest_and_file_collector(changedfiles))
1687 manifest_and_file_collector(changedfiles))
1688 for chnk in group:
1688 for chnk in group:
1689 yield chnk
1689 yield chnk
1690
1690
1691 # The list of manifests has been collected by the generator
1691 # The list of manifests has been collected by the generator
1692 # calling our functions back.
1692 # calling our functions back.
1693 prune_manifests()
1693 prune_manifests()
1694 msng_mnfst_lst = msng_mnfst_set.keys()
1694 msng_mnfst_lst = msng_mnfst_set.keys()
1695 # Sort the manifestnodes by revision number.
1695 # Sort the manifestnodes by revision number.
1696 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1696 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1697 # Create a generator for the manifestnodes that calls our lookup
1697 # Create a generator for the manifestnodes that calls our lookup
1698 # and data collection functions back.
1698 # and data collection functions back.
1699 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1699 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1700 filenode_collector(changedfiles))
1700 filenode_collector(changedfiles))
1701 for chnk in group:
1701 for chnk in group:
1702 yield chnk
1702 yield chnk
1703
1703
1704 # These are no longer needed, dereference and toss the memory for
1704 # These are no longer needed, dereference and toss the memory for
1705 # them.
1705 # them.
1706 msng_mnfst_lst = None
1706 msng_mnfst_lst = None
1707 msng_mnfst_set.clear()
1707 msng_mnfst_set.clear()
1708
1708
1709 changedfiles = changedfiles.keys()
1709 changedfiles = changedfiles.keys()
1710 changedfiles.sort()
1710 changedfiles.sort()
1711 # Go through all our files in order sorted by name.
1711 # Go through all our files in order sorted by name.
1712 for fname in changedfiles:
1712 for fname in changedfiles:
1713 filerevlog = self.file(fname)
1713 filerevlog = self.file(fname)
1714 # Toss out the filenodes that the recipient isn't really
1714 # Toss out the filenodes that the recipient isn't really
1715 # missing.
1715 # missing.
1716 if msng_filenode_set.has_key(fname):
1716 if msng_filenode_set.has_key(fname):
1717 prune_filenodes(fname, filerevlog)
1717 prune_filenodes(fname, filerevlog)
1718 msng_filenode_lst = msng_filenode_set[fname].keys()
1718 msng_filenode_lst = msng_filenode_set[fname].keys()
1719 else:
1719 else:
1720 msng_filenode_lst = []
1720 msng_filenode_lst = []
1721 # If any filenodes are left, generate the group for them,
1721 # If any filenodes are left, generate the group for them,
1722 # otherwise don't bother.
1722 # otherwise don't bother.
1723 if len(msng_filenode_lst) > 0:
1723 if len(msng_filenode_lst) > 0:
1724 yield changegroup.genchunk(fname)
1724 yield changegroup.genchunk(fname)
1725 # Sort the filenodes by their revision #
1725 # Sort the filenodes by their revision #
1726 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1726 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1727 # Create a group generator and only pass in a changenode
1727 # Create a group generator and only pass in a changenode
1728 # lookup function as we need to collect no information
1728 # lookup function as we need to collect no information
1729 # from filenodes.
1729 # from filenodes.
1730 group = filerevlog.group(msng_filenode_lst,
1730 group = filerevlog.group(msng_filenode_lst,
1731 lookup_filenode_link_func(fname))
1731 lookup_filenode_link_func(fname))
1732 for chnk in group:
1732 for chnk in group:
1733 yield chnk
1733 yield chnk
1734 if msng_filenode_set.has_key(fname):
1734 if msng_filenode_set.has_key(fname):
1735 # Don't need this anymore, toss it to free memory.
1735 # Don't need this anymore, toss it to free memory.
1736 del msng_filenode_set[fname]
1736 del msng_filenode_set[fname]
1737 # Signal that no more groups are left.
1737 # Signal that no more groups are left.
1738 yield changegroup.closechunk()
1738 yield changegroup.closechunk()
1739
1739
1740 if msng_cl_lst:
1740 if msng_cl_lst:
1741 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1741 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1742
1742
1743 return util.chunkbuffer(gengroup())
1743 return util.chunkbuffer(gengroup())
1744
1744
1745 def changegroup(self, basenodes, source):
1745 def changegroup(self, basenodes, source):
1746 """Generate a changegroup of all nodes that we have that a recipient
1746 """Generate a changegroup of all nodes that we have that a recipient
1747 doesn't.
1747 doesn't.
1748
1748
1749 This is much easier than the previous function as we can assume that
1749 This is much easier than the previous function as we can assume that
1750 the recipient has any changenode we aren't sending them."""
1750 the recipient has any changenode we aren't sending them."""
1751
1751
1752 self.hook('preoutgoing', throw=True, source=source)
1752 self.hook('preoutgoing', throw=True, source=source)
1753
1753
1754 cl = self.changelog
1754 cl = self.changelog
1755 nodes = cl.nodesbetween(basenodes, None)[0]
1755 nodes = cl.nodesbetween(basenodes, None)[0]
1756 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1756 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1757 self.changegroupinfo(nodes)
1757 self.changegroupinfo(nodes)
1758
1758
1759 def identity(x):
1759 def identity(x):
1760 return x
1760 return x
1761
1761
1762 def gennodelst(revlog):
1762 def gennodelst(revlog):
1763 for r in xrange(0, revlog.count()):
1763 for r in xrange(0, revlog.count()):
1764 n = revlog.node(r)
1764 n = revlog.node(r)
1765 if revlog.linkrev(n) in revset:
1765 if revlog.linkrev(n) in revset:
1766 yield n
1766 yield n
1767
1767
1768 def changed_file_collector(changedfileset):
1768 def changed_file_collector(changedfileset):
1769 def collect_changed_files(clnode):
1769 def collect_changed_files(clnode):
1770 c = cl.read(clnode)
1770 c = cl.read(clnode)
1771 for fname in c[3]:
1771 for fname in c[3]:
1772 changedfileset[fname] = 1
1772 changedfileset[fname] = 1
1773 return collect_changed_files
1773 return collect_changed_files
1774
1774
1775 def lookuprevlink_func(revlog):
1775 def lookuprevlink_func(revlog):
1776 def lookuprevlink(n):
1776 def lookuprevlink(n):
1777 return cl.node(revlog.linkrev(n))
1777 return cl.node(revlog.linkrev(n))
1778 return lookuprevlink
1778 return lookuprevlink
1779
1779
1780 def gengroup():
1780 def gengroup():
1781 # construct a list of all changed files
1781 # construct a list of all changed files
1782 changedfiles = {}
1782 changedfiles = {}
1783
1783
1784 for chnk in cl.group(nodes, identity,
1784 for chnk in cl.group(nodes, identity,
1785 changed_file_collector(changedfiles)):
1785 changed_file_collector(changedfiles)):
1786 yield chnk
1786 yield chnk
1787 changedfiles = changedfiles.keys()
1787 changedfiles = changedfiles.keys()
1788 changedfiles.sort()
1788 changedfiles.sort()
1789
1789
1790 mnfst = self.manifest
1790 mnfst = self.manifest
1791 nodeiter = gennodelst(mnfst)
1791 nodeiter = gennodelst(mnfst)
1792 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1792 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1793 yield chnk
1793 yield chnk
1794
1794
1795 for fname in changedfiles:
1795 for fname in changedfiles:
1796 filerevlog = self.file(fname)
1796 filerevlog = self.file(fname)
1797 nodeiter = gennodelst(filerevlog)
1797 nodeiter = gennodelst(filerevlog)
1798 nodeiter = list(nodeiter)
1798 nodeiter = list(nodeiter)
1799 if nodeiter:
1799 if nodeiter:
1800 yield changegroup.genchunk(fname)
1800 yield changegroup.genchunk(fname)
1801 lookup = lookuprevlink_func(filerevlog)
1801 lookup = lookuprevlink_func(filerevlog)
1802 for chnk in filerevlog.group(nodeiter, lookup):
1802 for chnk in filerevlog.group(nodeiter, lookup):
1803 yield chnk
1803 yield chnk
1804
1804
1805 yield changegroup.closechunk()
1805 yield changegroup.closechunk()
1806
1806
1807 if nodes:
1807 if nodes:
1808 self.hook('outgoing', node=hex(nodes[0]), source=source)
1808 self.hook('outgoing', node=hex(nodes[0]), source=source)
1809
1809
1810 return util.chunkbuffer(gengroup())
1810 return util.chunkbuffer(gengroup())
1811
1811
1812 def addchangegroup(self, source, srctype, url):
1812 def addchangegroup(self, source, srctype, url):
1813 """add changegroup to repo.
1813 """add changegroup to repo.
1814
1814
1815 return values:
1815 return values:
1816 - nothing changed or no source: 0
1816 - nothing changed or no source: 0
1817 - more heads than before: 1+added heads (2..n)
1817 - more heads than before: 1+added heads (2..n)
1818 - less heads than before: -1-removed heads (-2..-n)
1818 - less heads than before: -1-removed heads (-2..-n)
1819 - number of heads stays the same: 1
1819 - number of heads stays the same: 1
1820 """
1820 """
1821 def csmap(x):
1821 def csmap(x):
1822 self.ui.debug(_("add changeset %s\n") % short(x))
1822 self.ui.debug(_("add changeset %s\n") % short(x))
1823 return cl.count()
1823 return cl.count()
1824
1824
1825 def revmap(x):
1825 def revmap(x):
1826 return cl.rev(x)
1826 return cl.rev(x)
1827
1827
1828 if not source:
1828 if not source:
1829 return 0
1829 return 0
1830
1830
1831 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1831 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1832
1832
1833 changesets = files = revisions = 0
1833 changesets = files = revisions = 0
1834
1834
1835 # write changelog data to temp files so concurrent readers will not see
1835 # write changelog data to temp files so concurrent readers will not see
1836 # inconsistent view
1836 # inconsistent view
1837 cl = self.changelog
1837 cl = self.changelog
1838 cl.delayupdate()
1838 cl.delayupdate()
1839 oldheads = len(cl.heads())
1839 oldheads = len(cl.heads())
1840
1840
1841 tr = self.transaction()
1841 tr = self.transaction()
1842 try:
1842 try:
1843 trp = weakref.proxy(tr)
1843 trp = weakref.proxy(tr)
1844 # pull off the changeset group
1844 # pull off the changeset group
1845 self.ui.status(_("adding changesets\n"))
1845 self.ui.status(_("adding changesets\n"))
1846 cor = cl.count() - 1
1846 cor = cl.count() - 1
1847 chunkiter = changegroup.chunkiter(source)
1847 chunkiter = changegroup.chunkiter(source)
1848 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1848 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1849 raise util.Abort(_("received changelog group is empty"))
1849 raise util.Abort(_("received changelog group is empty"))
1850 cnr = cl.count() - 1
1850 cnr = cl.count() - 1
1851 changesets = cnr - cor
1851 changesets = cnr - cor
1852
1852
1853 # pull off the manifest group
1853 # pull off the manifest group
1854 self.ui.status(_("adding manifests\n"))
1854 self.ui.status(_("adding manifests\n"))
1855 chunkiter = changegroup.chunkiter(source)
1855 chunkiter = changegroup.chunkiter(source)
1856 # no need to check for empty manifest group here:
1856 # no need to check for empty manifest group here:
1857 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1857 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1858 # no new manifest will be created and the manifest group will
1858 # no new manifest will be created and the manifest group will
1859 # be empty during the pull
1859 # be empty during the pull
1860 self.manifest.addgroup(chunkiter, revmap, trp)
1860 self.manifest.addgroup(chunkiter, revmap, trp)
1861
1861
1862 # process the files
1862 # process the files
1863 self.ui.status(_("adding file changes\n"))
1863 self.ui.status(_("adding file changes\n"))
1864 while 1:
1864 while 1:
1865 f = changegroup.getchunk(source)
1865 f = changegroup.getchunk(source)
1866 if not f:
1866 if not f:
1867 break
1867 break
1868 self.ui.debug(_("adding %s revisions\n") % f)
1868 self.ui.debug(_("adding %s revisions\n") % f)
1869 fl = self.file(f)
1869 fl = self.file(f)
1870 o = fl.count()
1870 o = fl.count()
1871 chunkiter = changegroup.chunkiter(source)
1871 chunkiter = changegroup.chunkiter(source)
1872 if fl.addgroup(chunkiter, revmap, trp) is None:
1872 if fl.addgroup(chunkiter, revmap, trp) is None:
1873 raise util.Abort(_("received file revlog group is empty"))
1873 raise util.Abort(_("received file revlog group is empty"))
1874 revisions += fl.count() - o
1874 revisions += fl.count() - o
1875 files += 1
1875 files += 1
1876
1876
1877 # make changelog see real files again
1877 # make changelog see real files again
1878 cl.finalize(trp)
1878 cl.finalize(trp)
1879
1879
1880 newheads = len(self.changelog.heads())
1880 newheads = len(self.changelog.heads())
1881 heads = ""
1881 heads = ""
1882 if oldheads and newheads != oldheads:
1882 if oldheads and newheads != oldheads:
1883 heads = _(" (%+d heads)") % (newheads - oldheads)
1883 heads = _(" (%+d heads)") % (newheads - oldheads)
1884
1884
1885 self.ui.status(_("added %d changesets"
1885 self.ui.status(_("added %d changesets"
1886 " with %d changes to %d files%s\n")
1886 " with %d changes to %d files%s\n")
1887 % (changesets, revisions, files, heads))
1887 % (changesets, revisions, files, heads))
1888
1888
1889 if changesets > 0:
1889 if changesets > 0:
1890 self.hook('pretxnchangegroup', throw=True,
1890 self.hook('pretxnchangegroup', throw=True,
1891 node=hex(self.changelog.node(cor+1)), source=srctype,
1891 node=hex(self.changelog.node(cor+1)), source=srctype,
1892 url=url)
1892 url=url)
1893
1893
1894 tr.close()
1894 tr.close()
1895 finally:
1895 finally:
1896 del tr
1896 del tr
1897
1897
1898 if changesets > 0:
1898 if changesets > 0:
1899 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1900 source=srctype, url=url)
1900 source=srctype, url=url)
1901
1901
1902 for i in xrange(cor + 1, cnr + 1):
1902 for i in xrange(cor + 1, cnr + 1):
1903 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 self.hook("incoming", node=hex(self.changelog.node(i)),
1904 source=srctype, url=url)
1904 source=srctype, url=url)
1905
1905
1906 # never return 0 here:
1906 # never return 0 here:
1907 if newheads < oldheads:
1907 if newheads < oldheads:
1908 return newheads - oldheads - 1
1908 return newheads - oldheads - 1
1909 else:
1909 else:
1910 return newheads - oldheads + 1
1910 return newheads - oldheads + 1
1911
1911
1912
1912
1913 def stream_in(self, remote):
1913 def stream_in(self, remote):
1914 fp = remote.stream_out()
1914 fp = remote.stream_out()
1915 l = fp.readline()
1915 l = fp.readline()
1916 try:
1916 try:
1917 resp = int(l)
1917 resp = int(l)
1918 except ValueError:
1918 except ValueError:
1919 raise util.UnexpectedOutput(
1919 raise util.UnexpectedOutput(
1920 _('Unexpected response from remote server:'), l)
1920 _('Unexpected response from remote server:'), l)
1921 if resp == 1:
1921 if resp == 1:
1922 raise util.Abort(_('operation forbidden by server'))
1922 raise util.Abort(_('operation forbidden by server'))
1923 elif resp == 2:
1923 elif resp == 2:
1924 raise util.Abort(_('locking the remote repository failed'))
1924 raise util.Abort(_('locking the remote repository failed'))
1925 elif resp != 0:
1925 elif resp != 0:
1926 raise util.Abort(_('the server sent an unknown error code'))
1926 raise util.Abort(_('the server sent an unknown error code'))
1927 self.ui.status(_('streaming all changes\n'))
1927 self.ui.status(_('streaming all changes\n'))
1928 l = fp.readline()
1928 l = fp.readline()
1929 try:
1929 try:
1930 total_files, total_bytes = map(int, l.split(' ', 1))
1930 total_files, total_bytes = map(int, l.split(' ', 1))
1931 except ValueError, TypeError:
1931 except ValueError, TypeError:
1932 raise util.UnexpectedOutput(
1932 raise util.UnexpectedOutput(
1933 _('Unexpected response from remote server:'), l)
1933 _('Unexpected response from remote server:'), l)
1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1935 (total_files, util.bytecount(total_bytes)))
1935 (total_files, util.bytecount(total_bytes)))
1936 start = time.time()
1936 start = time.time()
1937 for i in xrange(total_files):
1937 for i in xrange(total_files):
1938 # XXX doesn't support '\n' or '\r' in filenames
1938 # XXX doesn't support '\n' or '\r' in filenames
1939 l = fp.readline()
1939 l = fp.readline()
1940 try:
1940 try:
1941 name, size = l.split('\0', 1)
1941 name, size = l.split('\0', 1)
1942 size = int(size)
1942 size = int(size)
1943 except ValueError, TypeError:
1943 except ValueError, TypeError:
1944 raise util.UnexpectedOutput(
1944 raise util.UnexpectedOutput(
1945 _('Unexpected response from remote server:'), l)
1945 _('Unexpected response from remote server:'), l)
1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1947 ofp = self.sopener(name, 'w')
1947 ofp = self.sopener(name, 'w')
1948 for chunk in util.filechunkiter(fp, limit=size):
1948 for chunk in util.filechunkiter(fp, limit=size):
1949 ofp.write(chunk)
1949 ofp.write(chunk)
1950 ofp.close()
1950 ofp.close()
1951 elapsed = time.time() - start
1951 elapsed = time.time() - start
1952 if elapsed <= 0:
1952 if elapsed <= 0:
1953 elapsed = 0.001
1953 elapsed = 0.001
1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1955 (util.bytecount(total_bytes), elapsed,
1955 (util.bytecount(total_bytes), elapsed,
1956 util.bytecount(total_bytes / elapsed)))
1956 util.bytecount(total_bytes / elapsed)))
1957 self.invalidate()
1957 self.invalidate()
1958 return len(self.heads()) + 1
1958 return len(self.heads()) + 1
1959
1959
1960 def clone(self, remote, heads=[], stream=False):
1960 def clone(self, remote, heads=[], stream=False):
1961 '''clone remote repository.
1961 '''clone remote repository.
1962
1962
1963 keyword arguments:
1963 keyword arguments:
1964 heads: list of revs to clone (forces use of pull)
1964 heads: list of revs to clone (forces use of pull)
1965 stream: use streaming clone if possible'''
1965 stream: use streaming clone if possible'''
1966
1966
1967 # now, all clients that can request uncompressed clones can
1967 # now, all clients that can request uncompressed clones can
1968 # read repo formats supported by all servers that can serve
1968 # read repo formats supported by all servers that can serve
1969 # them.
1969 # them.
1970
1970
1971 # if revlog format changes, client will have to check version
1971 # if revlog format changes, client will have to check version
1972 # and format flags on "stream" capability, and use
1972 # and format flags on "stream" capability, and use
1973 # uncompressed only if compatible.
1973 # uncompressed only if compatible.
1974
1974
1975 if stream and not heads and remote.capable('stream'):
1975 if stream and not heads and remote.capable('stream'):
1976 return self.stream_in(remote)
1976 return self.stream_in(remote)
1977 return self.pull(remote, heads)
1977 return self.pull(remote, heads)
1978
1978
1979 # used to avoid circular references so destructors work
1979 # used to avoid circular references so destructors work
1980 def aftertrans(files):
1980 def aftertrans(files):
1981 renamefiles = [tuple(t) for t in files]
1981 renamefiles = [tuple(t) for t in files]
1982 def a():
1982 def a():
1983 for src, dest in renamefiles:
1983 for src, dest in renamefiles:
1984 util.rename(src, dest)
1984 util.rename(src, dest)
1985 return a
1985 return a
1986
1986
1987 def instance(ui, path, create):
1987 def instance(ui, path, create):
1988 return localrepository(ui, util.drop_scheme('file', path), create)
1988 return localrepository(ui, util.drop_scheme('file', path), create)
1989
1989
1990 def islocal(path):
1990 def islocal(path):
1991 return True
1991 return True
@@ -1,225 +1,225 b''
1 # sshrepo.py - ssh repository proxy class for mercurial
1 # sshrepo.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from remoterepo import *
9 from remoterepo import *
10 from i18n import _
10 from i18n import _
11 import repo, os, re, stat, util
11 import repo, os, re, stat, util
12
12
13 class sshrepository(remoterepository):
13 class sshrepository(remoterepository):
14 def __init__(self, ui, path, create=0):
14 def __init__(self, ui, path, create=0):
15 self._url = path
15 self._url = path
16 self.ui = ui
16 self.ui = ui
17
17
18 m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
18 m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
19 if not m:
19 if not m:
20 self.raise_(repo.RepoError(_("couldn't parse location %s") % path))
20 self.raise_(repo.RepoError(_("couldn't parse location %s") % path))
21
21
22 self.user = m.group(2)
22 self.user = m.group(2)
23 self.host = m.group(3)
23 self.host = m.group(3)
24 self.port = m.group(5)
24 self.port = m.group(5)
25 self.path = m.group(7) or "."
25 self.path = m.group(7) or "."
26
26
27 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
27 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
28 args = self.port and ("%s -p %s") % (args, self.port) or args
28 args = self.port and ("%s -p %s") % (args, self.port) or args
29
29
30 sshcmd = self.ui.config("ui", "ssh", "ssh")
30 sshcmd = self.ui.config("ui", "ssh", "ssh")
31 remotecmd = self.ui.config("ui", "remotecmd", "hg")
31 remotecmd = self.ui.config("ui", "remotecmd", "hg")
32
32
33 if create:
33 if create:
34 cmd = '%s %s "%s init %s"'
34 cmd = '%s %s "%s init %s"'
35 cmd = cmd % (sshcmd, args, remotecmd, self.path)
35 cmd = cmd % (sshcmd, args, remotecmd, self.path)
36
36
37 ui.note('running %s\n' % cmd)
37 ui.note('running %s\n' % cmd)
38 res = os.system(cmd)
38 res = os.system(cmd)
39 if res != 0:
39 if res != 0:
40 self.raise_(repo.RepoError(_("could not create remote repo")))
40 self.raise_(repo.RepoError(_("could not create remote repo")))
41
41
42 self.validate_repo(ui, sshcmd, args, remotecmd)
42 self.validate_repo(ui, sshcmd, args, remotecmd)
43
43
44 def url(self):
44 def url(self):
45 return self._url
45 return self._url
46
46
47 def validate_repo(self, ui, sshcmd, args, remotecmd):
47 def validate_repo(self, ui, sshcmd, args, remotecmd):
48 # cleanup up previous run
48 # cleanup up previous run
49 self.cleanup()
49 self.cleanup()
50
50
51 cmd = '%s %s "%s -R %s serve --stdio"'
51 cmd = '%s %s "%s -R %s serve --stdio"'
52 cmd = cmd % (sshcmd, args, remotecmd, self.path)
52 cmd = cmd % (sshcmd, args, remotecmd, self.path)
53
53
54 ui.note('running %s\n' % cmd)
54 ui.note('running %s\n' % cmd)
55 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
55 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
56
56
57 # skip any noise generated by remote shell
57 # skip any noise generated by remote shell
58 self.do_cmd("hello")
58 self.do_cmd("hello")
59 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
59 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
60 lines = ["", "dummy"]
60 lines = ["", "dummy"]
61 max_noise = 500
61 max_noise = 500
62 while lines[-1] and max_noise:
62 while lines[-1] and max_noise:
63 l = r.readline()
63 l = r.readline()
64 self.readerr()
64 self.readerr()
65 if lines[-1] == "1\n" and l == "\n":
65 if lines[-1] == "1\n" and l == "\n":
66 break
66 break
67 if l:
67 if l:
68 ui.debug(_("remote: "), l)
68 ui.debug(_("remote: "), l)
69 lines.append(l)
69 lines.append(l)
70 max_noise -= 1
70 max_noise -= 1
71 else:
71 else:
72 self.raise_(repo.RepoError(_("no suitable response from remote hg")))
72 self.raise_(repo.RepoError(_("no suitable response from remote hg")))
73
73
74 self.capabilities = ()
74 self.capabilities = util.set()
75 lines.reverse()
75 lines.reverse()
76 for l in lines:
76 for l in lines:
77 if l.startswith("capabilities:"):
77 if l.startswith("capabilities:"):
78 self.capabilities = l[:-1].split(":")[1].split()
78 self.capabilities.update(l[:-1].split(":")[1].split())
79 break
79 break
80
80
81 def readerr(self):
81 def readerr(self):
82 while 1:
82 while 1:
83 size = util.fstat(self.pipee).st_size
83 size = util.fstat(self.pipee).st_size
84 if size == 0: break
84 if size == 0: break
85 l = self.pipee.readline()
85 l = self.pipee.readline()
86 if not l: break
86 if not l: break
87 self.ui.status(_("remote: "), l)
87 self.ui.status(_("remote: "), l)
88
88
89 def raise_(self, exception):
89 def raise_(self, exception):
90 self.cleanup()
90 self.cleanup()
91 raise exception
91 raise exception
92
92
93 def cleanup(self):
93 def cleanup(self):
94 try:
94 try:
95 self.pipeo.close()
95 self.pipeo.close()
96 self.pipei.close()
96 self.pipei.close()
97 # read the error descriptor until EOF
97 # read the error descriptor until EOF
98 for l in self.pipee:
98 for l in self.pipee:
99 self.ui.status(_("remote: "), l)
99 self.ui.status(_("remote: "), l)
100 self.pipee.close()
100 self.pipee.close()
101 except:
101 except:
102 pass
102 pass
103
103
104 __del__ = cleanup
104 __del__ = cleanup
105
105
106 def do_cmd(self, cmd, **args):
106 def do_cmd(self, cmd, **args):
107 self.ui.debug(_("sending %s command\n") % cmd)
107 self.ui.debug(_("sending %s command\n") % cmd)
108 self.pipeo.write("%s\n" % cmd)
108 self.pipeo.write("%s\n" % cmd)
109 for k, v in args.items():
109 for k, v in args.items():
110 self.pipeo.write("%s %d\n" % (k, len(v)))
110 self.pipeo.write("%s %d\n" % (k, len(v)))
111 self.pipeo.write(v)
111 self.pipeo.write(v)
112 self.pipeo.flush()
112 self.pipeo.flush()
113
113
114 return self.pipei
114 return self.pipei
115
115
116 def call(self, cmd, **args):
116 def call(self, cmd, **args):
117 r = self.do_cmd(cmd, **args)
117 r = self.do_cmd(cmd, **args)
118 l = r.readline()
118 l = r.readline()
119 self.readerr()
119 self.readerr()
120 try:
120 try:
121 l = int(l)
121 l = int(l)
122 except:
122 except:
123 self.raise_(util.UnexpectedOutput(_("unexpected response:"), l))
123 self.raise_(util.UnexpectedOutput(_("unexpected response:"), l))
124 return r.read(l)
124 return r.read(l)
125
125
126 def lock(self):
126 def lock(self):
127 self.call("lock")
127 self.call("lock")
128 return remotelock(self)
128 return remotelock(self)
129
129
130 def unlock(self):
130 def unlock(self):
131 self.call("unlock")
131 self.call("unlock")
132
132
133 def lookup(self, key):
133 def lookup(self, key):
134 d = self.call("lookup", key=key)
134 d = self.call("lookup", key=key)
135 success, data = d[:-1].split(" ", 1)
135 success, data = d[:-1].split(" ", 1)
136 if int(success):
136 if int(success):
137 return bin(data)
137 return bin(data)
138 else:
138 else:
139 self.raise_(repo.RepoError(data))
139 self.raise_(repo.RepoError(data))
140
140
141 def heads(self):
141 def heads(self):
142 d = self.call("heads")
142 d = self.call("heads")
143 try:
143 try:
144 return map(bin, d[:-1].split(" "))
144 return map(bin, d[:-1].split(" "))
145 except:
145 except:
146 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
146 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
147
147
148 def branches(self, nodes):
148 def branches(self, nodes):
149 n = " ".join(map(hex, nodes))
149 n = " ".join(map(hex, nodes))
150 d = self.call("branches", nodes=n)
150 d = self.call("branches", nodes=n)
151 try:
151 try:
152 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
152 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
153 return br
153 return br
154 except:
154 except:
155 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
155 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
156
156
157 def between(self, pairs):
157 def between(self, pairs):
158 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
158 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
159 d = self.call("between", pairs=n)
159 d = self.call("between", pairs=n)
160 try:
160 try:
161 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
161 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
162 return p
162 return p
163 except:
163 except:
164 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
164 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
165
165
166 def changegroup(self, nodes, kind):
166 def changegroup(self, nodes, kind):
167 n = " ".join(map(hex, nodes))
167 n = " ".join(map(hex, nodes))
168 return self.do_cmd("changegroup", roots=n)
168 return self.do_cmd("changegroup", roots=n)
169
169
170 def changegroupsubset(self, bases, heads, kind):
170 def changegroupsubset(self, bases, heads, kind):
171 bases = " ".join(map(hex, bases))
171 bases = " ".join(map(hex, bases))
172 heads = " ".join(map(hex, heads))
172 heads = " ".join(map(hex, heads))
173 return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
173 return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
174
174
175 def unbundle(self, cg, heads, source):
175 def unbundle(self, cg, heads, source):
176 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
176 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
177 if d:
177 if d:
178 # remote may send "unsynced changes"
178 # remote may send "unsynced changes"
179 self.raise_(repo.RepoError(_("push refused: %s") % d))
179 self.raise_(repo.RepoError(_("push refused: %s") % d))
180
180
181 while 1:
181 while 1:
182 d = cg.read(4096)
182 d = cg.read(4096)
183 if not d: break
183 if not d: break
184 self.pipeo.write(str(len(d)) + '\n')
184 self.pipeo.write(str(len(d)) + '\n')
185 self.pipeo.write(d)
185 self.pipeo.write(d)
186 self.readerr()
186 self.readerr()
187
187
188 self.pipeo.write('0\n')
188 self.pipeo.write('0\n')
189 self.pipeo.flush()
189 self.pipeo.flush()
190
190
191 self.readerr()
191 self.readerr()
192 l = int(self.pipei.readline())
192 l = int(self.pipei.readline())
193 r = self.pipei.read(l)
193 r = self.pipei.read(l)
194 if r:
194 if r:
195 # remote may send "unsynced changes"
195 # remote may send "unsynced changes"
196 self.raise_(hg.RepoError(_("push failed: %s") % r))
196 self.raise_(hg.RepoError(_("push failed: %s") % r))
197
197
198 self.readerr()
198 self.readerr()
199 l = int(self.pipei.readline())
199 l = int(self.pipei.readline())
200 r = self.pipei.read(l)
200 r = self.pipei.read(l)
201 return int(r)
201 return int(r)
202
202
203 def addchangegroup(self, cg, source, url):
203 def addchangegroup(self, cg, source, url):
204 d = self.call("addchangegroup")
204 d = self.call("addchangegroup")
205 if d:
205 if d:
206 self.raise_(repo.RepoError(_("push refused: %s") % d))
206 self.raise_(repo.RepoError(_("push refused: %s") % d))
207 while 1:
207 while 1:
208 d = cg.read(4096)
208 d = cg.read(4096)
209 if not d: break
209 if not d: break
210 self.pipeo.write(d)
210 self.pipeo.write(d)
211 self.readerr()
211 self.readerr()
212
212
213 self.pipeo.flush()
213 self.pipeo.flush()
214
214
215 self.readerr()
215 self.readerr()
216 l = int(self.pipei.readline())
216 l = int(self.pipei.readline())
217 r = self.pipei.read(l)
217 r = self.pipei.read(l)
218 if not r:
218 if not r:
219 return 1
219 return 1
220 return int(r)
220 return int(r)
221
221
222 def stream_out(self):
222 def stream_out(self):
223 return self.do_cmd('stream_out')
223 return self.do_cmd('stream_out')
224
224
225 instance = sshrepository
225 instance = sshrepository
General Comments 0
You need to be logged in to leave comments. Login now