##// END OF EJS Templates
extend network protocol to stop clients from locking servers...
Vadim Gelfer -
r2439:e8c4f3d3 default
parent child Browse files
Show More
@@ -1,238 +1,242 b''
1 # httprepo.py - HTTP repository proxy classes for mercurial
1 # httprepo.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from remoterepo import *
9 from remoterepo import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "hg os urllib urllib2 urlparse zlib util httplib")
12 demandload(globals(), "hg os urllib urllib2 urlparse zlib util httplib")
13 demandload(globals(), "keepalive")
13 demandload(globals(), "keepalive")
14
14
15 class passwordmgr(urllib2.HTTPPasswordMgr):
15 class passwordmgr(urllib2.HTTPPasswordMgr):
16 def __init__(self, ui):
16 def __init__(self, ui):
17 urllib2.HTTPPasswordMgr.__init__(self)
17 urllib2.HTTPPasswordMgr.__init__(self)
18 self.ui = ui
18 self.ui = ui
19
19
20 def find_user_password(self, realm, authuri):
20 def find_user_password(self, realm, authuri):
21 authinfo = urllib2.HTTPPasswordMgr.find_user_password(
21 authinfo = urllib2.HTTPPasswordMgr.find_user_password(
22 self, realm, authuri)
22 self, realm, authuri)
23 if authinfo != (None, None):
23 if authinfo != (None, None):
24 return authinfo
24 return authinfo
25
25
26 if not ui.interactive:
26 if not ui.interactive:
27 raise util.Abort(_('http authorization required'))
27 raise util.Abort(_('http authorization required'))
28
28
29 self.ui.write(_("http authorization required\n"))
29 self.ui.write(_("http authorization required\n"))
30 self.ui.status(_("realm: %s\n") % realm)
30 self.ui.status(_("realm: %s\n") % realm)
31 user = self.ui.prompt(_("user:"), default=None)
31 user = self.ui.prompt(_("user:"), default=None)
32 passwd = self.ui.getpass()
32 passwd = self.ui.getpass()
33
33
34 self.add_password(realm, authuri, user, passwd)
34 self.add_password(realm, authuri, user, passwd)
35 return (user, passwd)
35 return (user, passwd)
36
36
37 def netlocsplit(netloc):
37 def netlocsplit(netloc):
38 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
38 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
39
39
40 a = netloc.find('@')
40 a = netloc.find('@')
41 if a == -1:
41 if a == -1:
42 user, passwd = None, None
42 user, passwd = None, None
43 else:
43 else:
44 userpass, netloc = netloc[:a], netloc[a+1:]
44 userpass, netloc = netloc[:a], netloc[a+1:]
45 c = userpass.find(':')
45 c = userpass.find(':')
46 if c == -1:
46 if c == -1:
47 user, passwd = urllib.unquote(userpass), None
47 user, passwd = urllib.unquote(userpass), None
48 else:
48 else:
49 user = urllib.unquote(userpass[:c])
49 user = urllib.unquote(userpass[:c])
50 passwd = urllib.unquote(userpass[c+1:])
50 passwd = urllib.unquote(userpass[c+1:])
51 c = netloc.find(':')
51 c = netloc.find(':')
52 if c == -1:
52 if c == -1:
53 host, port = netloc, None
53 host, port = netloc, None
54 else:
54 else:
55 host, port = netloc[:c], netloc[c+1:]
55 host, port = netloc[:c], netloc[c+1:]
56 return host, port, user, passwd
56 return host, port, user, passwd
57
57
58 def netlocunsplit(host, port, user=None, passwd=None):
58 def netlocunsplit(host, port, user=None, passwd=None):
59 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
59 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
60 if port:
60 if port:
61 hostport = host + ':' + port
61 hostport = host + ':' + port
62 else:
62 else:
63 hostport = host
63 hostport = host
64 if user:
64 if user:
65 if passwd:
65 if passwd:
66 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
66 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
67 else:
67 else:
68 userpass = urllib.quote(user)
68 userpass = urllib.quote(user)
69 return userpass + '@' + hostport
69 return userpass + '@' + hostport
70 return hostport
70 return hostport
71
71
72 class httprepository(remoterepository):
72 class httprepository(remoterepository):
73 def __init__(self, ui, path):
73 def __init__(self, ui, path):
74 self.capabilities = ()
74 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
75 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
75 if query or frag:
76 if query or frag:
76 raise util.Abort(_('unsupported URL component: "%s"') %
77 raise util.Abort(_('unsupported URL component: "%s"') %
77 (query or frag))
78 (query or frag))
78 if not urlpath: urlpath = '/'
79 if not urlpath: urlpath = '/'
79 host, port, user, passwd = netlocsplit(netloc)
80 host, port, user, passwd = netlocsplit(netloc)
80
81
81 # urllib cannot handle URLs with embedded user or passwd
82 # urllib cannot handle URLs with embedded user or passwd
82 self.url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
83 self.url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
83 urlpath, '', ''))
84 urlpath, '', ''))
84 self.ui = ui
85 self.ui = ui
85
86
86 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
87 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
87 proxyauthinfo = None
88 proxyauthinfo = None
88 handler = keepalive.HTTPHandler()
89 handler = keepalive.HTTPHandler()
89
90
90 if proxyurl:
91 if proxyurl:
91 # proxy can be proper url or host[:port]
92 # proxy can be proper url or host[:port]
92 if not (proxyurl.startswith('http:') or
93 if not (proxyurl.startswith('http:') or
93 proxyurl.startswith('https:')):
94 proxyurl.startswith('https:')):
94 proxyurl = 'http://' + proxyurl + '/'
95 proxyurl = 'http://' + proxyurl + '/'
95 snpqf = urlparse.urlsplit(proxyurl)
96 snpqf = urlparse.urlsplit(proxyurl)
96 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
97 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
97 hpup = netlocsplit(proxynetloc)
98 hpup = netlocsplit(proxynetloc)
98
99
99 proxyhost, proxyport, proxyuser, proxypasswd = hpup
100 proxyhost, proxyport, proxyuser, proxypasswd = hpup
100 if not proxyuser:
101 if not proxyuser:
101 proxyuser = ui.config("http_proxy", "user")
102 proxyuser = ui.config("http_proxy", "user")
102 proxypasswd = ui.config("http_proxy", "passwd")
103 proxypasswd = ui.config("http_proxy", "passwd")
103
104
104 # see if we should use a proxy for this url
105 # see if we should use a proxy for this url
105 no_list = [ "localhost", "127.0.0.1" ]
106 no_list = [ "localhost", "127.0.0.1" ]
106 no_list.extend([p.strip().lower() for
107 no_list.extend([p.strip().lower() for
107 p in ui.config("http_proxy", "no", '').split(',')
108 p in ui.config("http_proxy", "no", '').split(',')
108 if p.strip()])
109 if p.strip()])
109 no_list.extend([p.strip().lower() for
110 no_list.extend([p.strip().lower() for
110 p in os.getenv("no_proxy", '').split(',')
111 p in os.getenv("no_proxy", '').split(',')
111 if p.strip()])
112 if p.strip()])
112 # "http_proxy.always" config is for running tests on localhost
113 # "http_proxy.always" config is for running tests on localhost
113 if (not ui.configbool("http_proxy", "always") and
114 if (not ui.configbool("http_proxy", "always") and
114 host.lower() in no_list):
115 host.lower() in no_list):
115 ui.debug(_('disabling proxy for %s\n') % host)
116 ui.debug(_('disabling proxy for %s\n') % host)
116 else:
117 else:
117 proxyurl = urlparse.urlunsplit((
118 proxyurl = urlparse.urlunsplit((
118 proxyscheme, netlocunsplit(proxyhost, proxyport,
119 proxyscheme, netlocunsplit(proxyhost, proxyport,
119 proxyuser, proxypasswd or ''),
120 proxyuser, proxypasswd or ''),
120 proxypath, proxyquery, proxyfrag))
121 proxypath, proxyquery, proxyfrag))
121 handler = urllib2.ProxyHandler({scheme: proxyurl})
122 handler = urllib2.ProxyHandler({scheme: proxyurl})
122 ui.debug(_('proxying through %s\n') % proxyurl)
123 ui.debug(_('proxying through %s\n') % proxyurl)
123
124
124 # urllib2 takes proxy values from the environment and those
125 # urllib2 takes proxy values from the environment and those
125 # will take precedence if found, so drop them
126 # will take precedence if found, so drop them
126 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
127 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
127 try:
128 try:
128 if os.environ.has_key(env):
129 if os.environ.has_key(env):
129 del os.environ[env]
130 del os.environ[env]
130 except OSError:
131 except OSError:
131 pass
132 pass
132
133
133 passmgr = passwordmgr(ui)
134 passmgr = passwordmgr(ui)
134 if user:
135 if user:
135 ui.debug(_('will use user %s for http auth\n') % user)
136 ui.debug(_('will use user %s for http auth\n') % user)
136 passmgr.add_password(None, host, user, passwd or '')
137 passmgr.add_password(None, host, user, passwd or '')
137
138
138 opener = urllib2.build_opener(
139 opener = urllib2.build_opener(
139 handler,
140 handler,
140 urllib2.HTTPBasicAuthHandler(passmgr),
141 urllib2.HTTPBasicAuthHandler(passmgr),
141 urllib2.HTTPDigestAuthHandler(passmgr))
142 urllib2.HTTPDigestAuthHandler(passmgr))
142
143
143 # 1.0 here is the _protocol_ version
144 # 1.0 here is the _protocol_ version
144 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
145 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
145 urllib2.install_opener(opener)
146 urllib2.install_opener(opener)
146
147
147 def dev(self):
148 def dev(self):
148 return -1
149 return -1
149
150
150 def lock(self):
151 def lock(self):
151 raise util.Abort(_('operation not supported over http'))
152 raise util.Abort(_('operation not supported over http'))
152
153
153 def do_cmd(self, cmd, **args):
154 def do_cmd(self, cmd, **args):
154 self.ui.debug(_("sending %s command\n") % cmd)
155 self.ui.debug(_("sending %s command\n") % cmd)
155 q = {"cmd": cmd}
156 q = {"cmd": cmd}
156 q.update(args)
157 q.update(args)
157 qs = urllib.urlencode(q)
158 qs = urllib.urlencode(q)
158 cu = "%s?%s" % (self.url, qs)
159 cu = "%s?%s" % (self.url, qs)
159 try:
160 try:
160 resp = urllib2.urlopen(cu)
161 resp = urllib2.urlopen(cu)
161 except httplib.HTTPException, inst:
162 except httplib.HTTPException, inst:
162 self.ui.debug(_('http error while sending %s command\n') % cmd)
163 self.ui.debug(_('http error while sending %s command\n') % cmd)
163 self.ui.print_exc()
164 self.ui.print_exc()
164 raise IOError(None, inst)
165 raise IOError(None, inst)
165 try:
166 try:
166 proto = resp.getheader('content-type')
167 proto = resp.getheader('content-type')
167 except AttributeError:
168 except AttributeError:
168 proto = resp.headers['content-type']
169 proto = resp.headers['content-type']
169
170
170 # accept old "text/plain" and "application/hg-changegroup" for now
171 # accept old "text/plain" and "application/hg-changegroup" for now
171 if not proto.startswith('application/mercurial') and \
172 if not proto.startswith('application/mercurial') and \
172 not proto.startswith('text/plain') and \
173 not proto.startswith('text/plain') and \
173 not proto.startswith('application/hg-changegroup'):
174 not proto.startswith('application/hg-changegroup'):
174 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
175 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
175 self.url)
176 self.url)
176
177
177 if proto.startswith('application/mercurial'):
178 if proto.startswith('application/mercurial'):
178 version = proto[22:]
179 version = proto[22:]
179 if float(version) > 0.1:
180 if float(version) > 0.1:
180 raise hg.RepoError(_("'%s' uses newer protocol %s") %
181 raise hg.RepoError(_("'%s' uses newer protocol %s") %
181 (self.url, version))
182 (self.url, version))
182
183
183 return resp
184 return resp
184
185
185 def do_read(self, cmd, **args):
186 def do_read(self, cmd, **args):
186 fp = self.do_cmd(cmd, **args)
187 fp = self.do_cmd(cmd, **args)
187 try:
188 try:
188 return fp.read()
189 return fp.read()
189 finally:
190 finally:
190 # if using keepalive, allow connection to be reused
191 # if using keepalive, allow connection to be reused
191 fp.close()
192 fp.close()
192
193
193 def heads(self):
194 def heads(self):
194 d = self.do_read("heads")
195 d = self.do_read("heads")
195 try:
196 try:
196 return map(bin, d[:-1].split(" "))
197 return map(bin, d[:-1].split(" "))
197 except:
198 except:
198 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
199 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
199 raise
200 raise
200
201
201 def branches(self, nodes):
202 def branches(self, nodes):
202 n = " ".join(map(hex, nodes))
203 n = " ".join(map(hex, nodes))
203 d = self.do_read("branches", nodes=n)
204 d = self.do_read("branches", nodes=n)
204 try:
205 try:
205 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
206 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
206 return br
207 return br
207 except:
208 except:
208 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
209 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
209 raise
210 raise
210
211
211 def between(self, pairs):
212 def between(self, pairs):
212 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
213 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
213 d = self.do_read("between", pairs=n)
214 d = self.do_read("between", pairs=n)
214 try:
215 try:
215 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
216 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
216 return p
217 return p
217 except:
218 except:
218 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
219 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
219 raise
220 raise
220
221
221 def changegroup(self, nodes, kind):
222 def changegroup(self, nodes, kind):
222 n = " ".join(map(hex, nodes))
223 n = " ".join(map(hex, nodes))
223 f = self.do_cmd("changegroup", roots=n)
224 f = self.do_cmd("changegroup", roots=n)
224 bytes = 0
225 bytes = 0
225
226
226 def zgenerator(f):
227 def zgenerator(f):
227 zd = zlib.decompressobj()
228 zd = zlib.decompressobj()
228 try:
229 try:
229 for chnk in f:
230 for chnk in f:
230 yield zd.decompress(chnk)
231 yield zd.decompress(chnk)
231 except httplib.HTTPException, inst:
232 except httplib.HTTPException, inst:
232 raise IOError(None, _('connection ended unexpectedly'))
233 raise IOError(None, _('connection ended unexpectedly'))
233 yield zd.flush()
234 yield zd.flush()
234
235
235 return util.chunkbuffer(zgenerator(util.filechunkiter(f)))
236 return util.chunkbuffer(zgenerator(util.filechunkiter(f)))
236
237
238 def unbundle(self, cg, heads, source):
239 raise util.Abort(_('operation not supported over http'))
240
237 class httpsrepository(httprepository):
241 class httpsrepository(httprepository):
238 pass
242 pass
@@ -1,2109 +1,2145 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "revlog")
15 demandload(globals(), "revlog")
16
16
17 class localrepository(object):
17 class localrepository(object):
18 capabilities = ()
19
18 def __del__(self):
20 def __del__(self):
19 self.transhandle = None
21 self.transhandle = None
20 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
21 if not path:
23 if not path:
22 p = os.getcwd()
24 p = os.getcwd()
23 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
24 oldp = p
26 oldp = p
25 p = os.path.dirname(p)
27 p = os.path.dirname(p)
26 if p == oldp:
28 if p == oldp:
27 raise repo.RepoError(_("no repo found"))
29 raise repo.RepoError(_("no repo found"))
28 path = p
30 path = p
29 self.path = os.path.join(path, ".hg")
31 self.path = os.path.join(path, ".hg")
30
32
31 if not create and not os.path.isdir(self.path):
33 if not create and not os.path.isdir(self.path):
32 raise repo.RepoError(_("repository %s not found") % path)
34 raise repo.RepoError(_("repository %s not found") % path)
33
35
34 self.root = os.path.abspath(path)
36 self.root = os.path.abspath(path)
35 self.origroot = path
37 self.origroot = path
36 self.ui = ui.ui(parentui=parentui)
38 self.ui = ui.ui(parentui=parentui)
37 self.opener = util.opener(self.path)
39 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
40 self.wopener = util.opener(self.root)
39
41
40 try:
42 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
43 self.ui.readconfig(self.join("hgrc"), self.root)
42 except IOError:
44 except IOError:
43 pass
45 pass
44
46
45 v = self.ui.revlogopts
47 v = self.ui.revlogopts
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
48 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
49 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 fl = v.get('flags', None)
50 fl = v.get('flags', None)
49 flags = 0
51 flags = 0
50 if fl != None:
52 if fl != None:
51 for x in fl.split():
53 for x in fl.split():
52 flags |= revlog.flagstr(x)
54 flags |= revlog.flagstr(x)
53 elif self.revlogv1:
55 elif self.revlogv1:
54 flags = revlog.REVLOG_DEFAULT_FLAGS
56 flags = revlog.REVLOG_DEFAULT_FLAGS
55
57
56 v = self.revlogversion | flags
58 v = self.revlogversion | flags
57 self.manifest = manifest.manifest(self.opener, v)
59 self.manifest = manifest.manifest(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
60 self.changelog = changelog.changelog(self.opener, v)
59
61
60 # the changelog might not have the inline index flag
62 # the changelog might not have the inline index flag
61 # on. If the format of the changelog is the same as found in
63 # on. If the format of the changelog is the same as found in
62 # .hgrc, apply any flags found in the .hgrc as well.
64 # .hgrc, apply any flags found in the .hgrc as well.
63 # Otherwise, just version from the changelog
65 # Otherwise, just version from the changelog
64 v = self.changelog.version
66 v = self.changelog.version
65 if v == self.revlogversion:
67 if v == self.revlogversion:
66 v |= flags
68 v |= flags
67 self.revlogversion = v
69 self.revlogversion = v
68
70
69 self.tagscache = None
71 self.tagscache = None
70 self.nodetagscache = None
72 self.nodetagscache = None
71 self.encodepats = None
73 self.encodepats = None
72 self.decodepats = None
74 self.decodepats = None
73 self.transhandle = None
75 self.transhandle = None
74
76
75 if create:
77 if create:
76 os.mkdir(self.path)
78 os.mkdir(self.path)
77 os.mkdir(self.join("data"))
79 os.mkdir(self.join("data"))
78
80
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
81 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80
82
81 def hook(self, name, throw=False, **args):
83 def hook(self, name, throw=False, **args):
82 def callhook(hname, funcname):
84 def callhook(hname, funcname):
83 '''call python hook. hook is callable object, looked up as
85 '''call python hook. hook is callable object, looked up as
84 name in python module. if callable returns "true", hook
86 name in python module. if callable returns "true", hook
85 fails, else passes. if hook raises exception, treated as
87 fails, else passes. if hook raises exception, treated as
86 hook failure. exception propagates if throw is "true".
88 hook failure. exception propagates if throw is "true".
87
89
88 reason for "true" meaning "hook failed" is so that
90 reason for "true" meaning "hook failed" is so that
89 unmodified commands (e.g. mercurial.commands.update) can
91 unmodified commands (e.g. mercurial.commands.update) can
90 be run as hooks without wrappers to convert return values.'''
92 be run as hooks without wrappers to convert return values.'''
91
93
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
94 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 d = funcname.rfind('.')
95 d = funcname.rfind('.')
94 if d == -1:
96 if d == -1:
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
97 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 % (hname, funcname))
98 % (hname, funcname))
97 modname = funcname[:d]
99 modname = funcname[:d]
98 try:
100 try:
99 obj = __import__(modname)
101 obj = __import__(modname)
100 except ImportError:
102 except ImportError:
101 raise util.Abort(_('%s hook is invalid '
103 raise util.Abort(_('%s hook is invalid '
102 '(import of "%s" failed)') %
104 '(import of "%s" failed)') %
103 (hname, modname))
105 (hname, modname))
104 try:
106 try:
105 for p in funcname.split('.')[1:]:
107 for p in funcname.split('.')[1:]:
106 obj = getattr(obj, p)
108 obj = getattr(obj, p)
107 except AttributeError, err:
109 except AttributeError, err:
108 raise util.Abort(_('%s hook is invalid '
110 raise util.Abort(_('%s hook is invalid '
109 '("%s" is not defined)') %
111 '("%s" is not defined)') %
110 (hname, funcname))
112 (hname, funcname))
111 if not callable(obj):
113 if not callable(obj):
112 raise util.Abort(_('%s hook is invalid '
114 raise util.Abort(_('%s hook is invalid '
113 '("%s" is not callable)') %
115 '("%s" is not callable)') %
114 (hname, funcname))
116 (hname, funcname))
115 try:
117 try:
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
118 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 except (KeyboardInterrupt, util.SignalInterrupt):
119 except (KeyboardInterrupt, util.SignalInterrupt):
118 raise
120 raise
119 except Exception, exc:
121 except Exception, exc:
120 if isinstance(exc, util.Abort):
122 if isinstance(exc, util.Abort):
121 self.ui.warn(_('error: %s hook failed: %s\n') %
123 self.ui.warn(_('error: %s hook failed: %s\n') %
122 (hname, exc.args[0] % exc.args[1:]))
124 (hname, exc.args[0] % exc.args[1:]))
123 else:
125 else:
124 self.ui.warn(_('error: %s hook raised an exception: '
126 self.ui.warn(_('error: %s hook raised an exception: '
125 '%s\n') % (hname, exc))
127 '%s\n') % (hname, exc))
126 if throw:
128 if throw:
127 raise
129 raise
128 self.ui.print_exc()
130 self.ui.print_exc()
129 return True
131 return True
130 if r:
132 if r:
131 if throw:
133 if throw:
132 raise util.Abort(_('%s hook failed') % hname)
134 raise util.Abort(_('%s hook failed') % hname)
133 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 return r
136 return r
135
137
136 def runhook(name, cmd):
138 def runhook(name, cmd):
137 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 r = util.system(cmd, environ=env, cwd=self.root)
141 r = util.system(cmd, environ=env, cwd=self.root)
140 if r:
142 if r:
141 desc, r = util.explain_exit(r)
143 desc, r = util.explain_exit(r)
142 if throw:
144 if throw:
143 raise util.Abort(_('%s hook %s') % (name, desc))
145 raise util.Abort(_('%s hook %s') % (name, desc))
144 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 return r
147 return r
146
148
147 r = False
149 r = False
148 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 if hname.split(".", 1)[0] == name and cmd]
151 if hname.split(".", 1)[0] == name and cmd]
150 hooks.sort()
152 hooks.sort()
151 for hname, cmd in hooks:
153 for hname, cmd in hooks:
152 if cmd.startswith('python:'):
154 if cmd.startswith('python:'):
153 r = callhook(hname, cmd[7:].strip()) or r
155 r = callhook(hname, cmd[7:].strip()) or r
154 else:
156 else:
155 r = runhook(hname, cmd) or r
157 r = runhook(hname, cmd) or r
156 return r
158 return r
157
159
158 def tags(self):
160 def tags(self):
159 '''return a mapping of tag to node'''
161 '''return a mapping of tag to node'''
160 if not self.tagscache:
162 if not self.tagscache:
161 self.tagscache = {}
163 self.tagscache = {}
162
164
163 def parsetag(line, context):
165 def parsetag(line, context):
164 if not line:
166 if not line:
165 return
167 return
166 s = l.split(" ", 1)
168 s = l.split(" ", 1)
167 if len(s) != 2:
169 if len(s) != 2:
168 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 return
171 return
170 node, key = s
172 node, key = s
171 key = key.strip()
173 key = key.strip()
172 try:
174 try:
173 bin_n = bin(node)
175 bin_n = bin(node)
174 except TypeError:
176 except TypeError:
175 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 (context, node))
178 (context, node))
177 return
179 return
178 if bin_n not in self.changelog.nodemap:
180 if bin_n not in self.changelog.nodemap:
179 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 (context, key))
182 (context, key))
181 return
183 return
182 self.tagscache[key] = bin_n
184 self.tagscache[key] = bin_n
183
185
184 # read the tags file from each head, ending with the tip,
186 # read the tags file from each head, ending with the tip,
185 # and add each tag found to the map, with "newer" ones
187 # and add each tag found to the map, with "newer" ones
186 # taking precedence
188 # taking precedence
187 heads = self.heads()
189 heads = self.heads()
188 heads.reverse()
190 heads.reverse()
189 fl = self.file(".hgtags")
191 fl = self.file(".hgtags")
190 for node in heads:
192 for node in heads:
191 change = self.changelog.read(node)
193 change = self.changelog.read(node)
192 rev = self.changelog.rev(node)
194 rev = self.changelog.rev(node)
193 fn, ff = self.manifest.find(change[0], '.hgtags')
195 fn, ff = self.manifest.find(change[0], '.hgtags')
194 if fn is None: continue
196 if fn is None: continue
195 count = 0
197 count = 0
196 for l in fl.read(fn).splitlines():
198 for l in fl.read(fn).splitlines():
197 count += 1
199 count += 1
198 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 (rev, short(node), count))
201 (rev, short(node), count))
200 try:
202 try:
201 f = self.opener("localtags")
203 f = self.opener("localtags")
202 count = 0
204 count = 0
203 for l in f:
205 for l in f:
204 count += 1
206 count += 1
205 parsetag(l, _("localtags, line %d") % count)
207 parsetag(l, _("localtags, line %d") % count)
206 except IOError:
208 except IOError:
207 pass
209 pass
208
210
209 self.tagscache['tip'] = self.changelog.tip()
211 self.tagscache['tip'] = self.changelog.tip()
210
212
211 return self.tagscache
213 return self.tagscache
212
214
213 def tagslist(self):
215 def tagslist(self):
214 '''return a list of tags ordered by revision'''
216 '''return a list of tags ordered by revision'''
215 l = []
217 l = []
216 for t, n in self.tags().items():
218 for t, n in self.tags().items():
217 try:
219 try:
218 r = self.changelog.rev(n)
220 r = self.changelog.rev(n)
219 except:
221 except:
220 r = -2 # sort to the beginning of the list if unknown
222 r = -2 # sort to the beginning of the list if unknown
221 l.append((r, t, n))
223 l.append((r, t, n))
222 l.sort()
224 l.sort()
223 return [(t, n) for r, t, n in l]
225 return [(t, n) for r, t, n in l]
224
226
225 def nodetags(self, node):
227 def nodetags(self, node):
226 '''return the tags associated with a node'''
228 '''return the tags associated with a node'''
227 if not self.nodetagscache:
229 if not self.nodetagscache:
228 self.nodetagscache = {}
230 self.nodetagscache = {}
229 for t, n in self.tags().items():
231 for t, n in self.tags().items():
230 self.nodetagscache.setdefault(n, []).append(t)
232 self.nodetagscache.setdefault(n, []).append(t)
231 return self.nodetagscache.get(node, [])
233 return self.nodetagscache.get(node, [])
232
234
233 def lookup(self, key):
235 def lookup(self, key):
234 try:
236 try:
235 return self.tags()[key]
237 return self.tags()[key]
236 except KeyError:
238 except KeyError:
237 try:
239 try:
238 return self.changelog.lookup(key)
240 return self.changelog.lookup(key)
239 except:
241 except:
240 raise repo.RepoError(_("unknown revision '%s'") % key)
242 raise repo.RepoError(_("unknown revision '%s'") % key)
241
243
242 def dev(self):
244 def dev(self):
243 return os.stat(self.path).st_dev
245 return os.stat(self.path).st_dev
244
246
245 def local(self):
247 def local(self):
246 return True
248 return True
247
249
248 def join(self, f):
250 def join(self, f):
249 return os.path.join(self.path, f)
251 return os.path.join(self.path, f)
250
252
251 def wjoin(self, f):
253 def wjoin(self, f):
252 return os.path.join(self.root, f)
254 return os.path.join(self.root, f)
253
255
254 def file(self, f):
256 def file(self, f):
255 if f[0] == '/':
257 if f[0] == '/':
256 f = f[1:]
258 f = f[1:]
257 return filelog.filelog(self.opener, f, self.revlogversion)
259 return filelog.filelog(self.opener, f, self.revlogversion)
258
260
259 def getcwd(self):
261 def getcwd(self):
260 return self.dirstate.getcwd()
262 return self.dirstate.getcwd()
261
263
262 def wfile(self, f, mode='r'):
264 def wfile(self, f, mode='r'):
263 return self.wopener(f, mode)
265 return self.wopener(f, mode)
264
266
265 def wread(self, filename):
267 def wread(self, filename):
266 if self.encodepats == None:
268 if self.encodepats == None:
267 l = []
269 l = []
268 for pat, cmd in self.ui.configitems("encode"):
270 for pat, cmd in self.ui.configitems("encode"):
269 mf = util.matcher(self.root, "", [pat], [], [])[1]
271 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 l.append((mf, cmd))
272 l.append((mf, cmd))
271 self.encodepats = l
273 self.encodepats = l
272
274
273 data = self.wopener(filename, 'r').read()
275 data = self.wopener(filename, 'r').read()
274
276
275 for mf, cmd in self.encodepats:
277 for mf, cmd in self.encodepats:
276 if mf(filename):
278 if mf(filename):
277 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
279 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 data = util.filter(data, cmd)
280 data = util.filter(data, cmd)
279 break
281 break
280
282
281 return data
283 return data
282
284
283 def wwrite(self, filename, data, fd=None):
285 def wwrite(self, filename, data, fd=None):
284 if self.decodepats == None:
286 if self.decodepats == None:
285 l = []
287 l = []
286 for pat, cmd in self.ui.configitems("decode"):
288 for pat, cmd in self.ui.configitems("decode"):
287 mf = util.matcher(self.root, "", [pat], [], [])[1]
289 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 l.append((mf, cmd))
290 l.append((mf, cmd))
289 self.decodepats = l
291 self.decodepats = l
290
292
291 for mf, cmd in self.decodepats:
293 for mf, cmd in self.decodepats:
292 if mf(filename):
294 if mf(filename):
293 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
295 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 data = util.filter(data, cmd)
296 data = util.filter(data, cmd)
295 break
297 break
296
298
297 if fd:
299 if fd:
298 return fd.write(data)
300 return fd.write(data)
299 return self.wopener(filename, 'w').write(data)
301 return self.wopener(filename, 'w').write(data)
300
302
301 def transaction(self):
303 def transaction(self):
302 tr = self.transhandle
304 tr = self.transhandle
303 if tr != None and tr.running():
305 if tr != None and tr.running():
304 return tr.nest()
306 return tr.nest()
305
307
306 # save dirstate for rollback
308 # save dirstate for rollback
307 try:
309 try:
308 ds = self.opener("dirstate").read()
310 ds = self.opener("dirstate").read()
309 except IOError:
311 except IOError:
310 ds = ""
312 ds = ""
311 self.opener("journal.dirstate", "w").write(ds)
313 self.opener("journal.dirstate", "w").write(ds)
312
314
313 tr = transaction.transaction(self.ui.warn, self.opener,
315 tr = transaction.transaction(self.ui.warn, self.opener,
314 self.join("journal"),
316 self.join("journal"),
315 aftertrans(self.path))
317 aftertrans(self.path))
316 self.transhandle = tr
318 self.transhandle = tr
317 return tr
319 return tr
318
320
319 def recover(self):
321 def recover(self):
320 l = self.lock()
322 l = self.lock()
321 if os.path.exists(self.join("journal")):
323 if os.path.exists(self.join("journal")):
322 self.ui.status(_("rolling back interrupted transaction\n"))
324 self.ui.status(_("rolling back interrupted transaction\n"))
323 transaction.rollback(self.opener, self.join("journal"))
325 transaction.rollback(self.opener, self.join("journal"))
324 self.reload()
326 self.reload()
325 return True
327 return True
326 else:
328 else:
327 self.ui.warn(_("no interrupted transaction available\n"))
329 self.ui.warn(_("no interrupted transaction available\n"))
328 return False
330 return False
329
331
330 def rollback(self, wlock=None):
332 def rollback(self, wlock=None):
331 if not wlock:
333 if not wlock:
332 wlock = self.wlock()
334 wlock = self.wlock()
333 l = self.lock()
335 l = self.lock()
334 if os.path.exists(self.join("undo")):
336 if os.path.exists(self.join("undo")):
335 self.ui.status(_("rolling back last transaction\n"))
337 self.ui.status(_("rolling back last transaction\n"))
336 transaction.rollback(self.opener, self.join("undo"))
338 transaction.rollback(self.opener, self.join("undo"))
337 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
339 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 self.reload()
340 self.reload()
339 self.wreload()
341 self.wreload()
340 else:
342 else:
341 self.ui.warn(_("no rollback information available\n"))
343 self.ui.warn(_("no rollback information available\n"))
342
344
343 def wreload(self):
345 def wreload(self):
344 self.dirstate.read()
346 self.dirstate.read()
345
347
346 def reload(self):
348 def reload(self):
347 self.changelog.load()
349 self.changelog.load()
348 self.manifest.load()
350 self.manifest.load()
349 self.tagscache = None
351 self.tagscache = None
350 self.nodetagscache = None
352 self.nodetagscache = None
351
353
352 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
354 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 desc=None):
355 desc=None):
354 try:
356 try:
355 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
357 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 except lock.LockHeld, inst:
358 except lock.LockHeld, inst:
357 if not wait:
359 if not wait:
358 raise
360 raise
359 self.ui.warn(_("waiting for lock on %s held by %s\n") %
361 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 (desc, inst.args[0]))
362 (desc, inst.args[0]))
361 # default to 600 seconds timeout
363 # default to 600 seconds timeout
362 l = lock.lock(self.join(lockname),
364 l = lock.lock(self.join(lockname),
363 int(self.ui.config("ui", "timeout") or 600),
365 int(self.ui.config("ui", "timeout") or 600),
364 releasefn, desc=desc)
366 releasefn, desc=desc)
365 if acquirefn:
367 if acquirefn:
366 acquirefn()
368 acquirefn()
367 return l
369 return l
368
370
369 def lock(self, wait=1):
371 def lock(self, wait=1):
370 return self.do_lock("lock", wait, acquirefn=self.reload,
372 return self.do_lock("lock", wait, acquirefn=self.reload,
371 desc=_('repository %s') % self.origroot)
373 desc=_('repository %s') % self.origroot)
372
374
373 def wlock(self, wait=1):
375 def wlock(self, wait=1):
374 return self.do_lock("wlock", wait, self.dirstate.write,
376 return self.do_lock("wlock", wait, self.dirstate.write,
375 self.wreload,
377 self.wreload,
376 desc=_('working directory of %s') % self.origroot)
378 desc=_('working directory of %s') % self.origroot)
377
379
378 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
380 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 "determine whether a new filenode is needed"
381 "determine whether a new filenode is needed"
380 fp1 = manifest1.get(filename, nullid)
382 fp1 = manifest1.get(filename, nullid)
381 fp2 = manifest2.get(filename, nullid)
383 fp2 = manifest2.get(filename, nullid)
382
384
383 if fp2 != nullid:
385 if fp2 != nullid:
384 # is one parent an ancestor of the other?
386 # is one parent an ancestor of the other?
385 fpa = filelog.ancestor(fp1, fp2)
387 fpa = filelog.ancestor(fp1, fp2)
386 if fpa == fp1:
388 if fpa == fp1:
387 fp1, fp2 = fp2, nullid
389 fp1, fp2 = fp2, nullid
388 elif fpa == fp2:
390 elif fpa == fp2:
389 fp2 = nullid
391 fp2 = nullid
390
392
391 # is the file unmodified from the parent? report existing entry
393 # is the file unmodified from the parent? report existing entry
392 if fp2 == nullid and text == filelog.read(fp1):
394 if fp2 == nullid and text == filelog.read(fp1):
393 return (fp1, None, None)
395 return (fp1, None, None)
394
396
395 return (None, fp1, fp2)
397 return (None, fp1, fp2)
396
398
397 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
399 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 orig_parent = self.dirstate.parents()[0] or nullid
400 orig_parent = self.dirstate.parents()[0] or nullid
399 p1 = p1 or self.dirstate.parents()[0] or nullid
401 p1 = p1 or self.dirstate.parents()[0] or nullid
400 p2 = p2 or self.dirstate.parents()[1] or nullid
402 p2 = p2 or self.dirstate.parents()[1] or nullid
401 c1 = self.changelog.read(p1)
403 c1 = self.changelog.read(p1)
402 c2 = self.changelog.read(p2)
404 c2 = self.changelog.read(p2)
403 m1 = self.manifest.read(c1[0])
405 m1 = self.manifest.read(c1[0])
404 mf1 = self.manifest.readflags(c1[0])
406 mf1 = self.manifest.readflags(c1[0])
405 m2 = self.manifest.read(c2[0])
407 m2 = self.manifest.read(c2[0])
406 changed = []
408 changed = []
407
409
408 if orig_parent == p1:
410 if orig_parent == p1:
409 update_dirstate = 1
411 update_dirstate = 1
410 else:
412 else:
411 update_dirstate = 0
413 update_dirstate = 0
412
414
413 if not wlock:
415 if not wlock:
414 wlock = self.wlock()
416 wlock = self.wlock()
415 l = self.lock()
417 l = self.lock()
416 tr = self.transaction()
418 tr = self.transaction()
417 mm = m1.copy()
419 mm = m1.copy()
418 mfm = mf1.copy()
420 mfm = mf1.copy()
419 linkrev = self.changelog.count()
421 linkrev = self.changelog.count()
420 for f in files:
422 for f in files:
421 try:
423 try:
422 t = self.wread(f)
424 t = self.wread(f)
423 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
425 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 r = self.file(f)
426 r = self.file(f)
425 mfm[f] = tm
427 mfm[f] = tm
426
428
427 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
429 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 if entry:
430 if entry:
429 mm[f] = entry
431 mm[f] = entry
430 continue
432 continue
431
433
432 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
434 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 changed.append(f)
435 changed.append(f)
434 if update_dirstate:
436 if update_dirstate:
435 self.dirstate.update([f], "n")
437 self.dirstate.update([f], "n")
436 except IOError:
438 except IOError:
437 try:
439 try:
438 del mm[f]
440 del mm[f]
439 del mfm[f]
441 del mfm[f]
440 if update_dirstate:
442 if update_dirstate:
441 self.dirstate.forget([f])
443 self.dirstate.forget([f])
442 except:
444 except:
443 # deleted from p2?
445 # deleted from p2?
444 pass
446 pass
445
447
446 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
448 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 user = user or self.ui.username()
449 user = user or self.ui.username()
448 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
450 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 tr.close()
451 tr.close()
450 if update_dirstate:
452 if update_dirstate:
451 self.dirstate.setparents(n, nullid)
453 self.dirstate.setparents(n, nullid)
452
454
453 def commit(self, files=None, text="", user=None, date=None,
455 def commit(self, files=None, text="", user=None, date=None,
454 match=util.always, force=False, lock=None, wlock=None,
456 match=util.always, force=False, lock=None, wlock=None,
455 force_editor=False):
457 force_editor=False):
456 commit = []
458 commit = []
457 remove = []
459 remove = []
458 changed = []
460 changed = []
459
461
460 if files:
462 if files:
461 for f in files:
463 for f in files:
462 s = self.dirstate.state(f)
464 s = self.dirstate.state(f)
463 if s in 'nmai':
465 if s in 'nmai':
464 commit.append(f)
466 commit.append(f)
465 elif s == 'r':
467 elif s == 'r':
466 remove.append(f)
468 remove.append(f)
467 else:
469 else:
468 self.ui.warn(_("%s not tracked!\n") % f)
470 self.ui.warn(_("%s not tracked!\n") % f)
469 else:
471 else:
470 modified, added, removed, deleted, unknown = self.changes(match=match)
472 modified, added, removed, deleted, unknown = self.changes(match=match)
471 commit = modified + added
473 commit = modified + added
472 remove = removed
474 remove = removed
473
475
474 p1, p2 = self.dirstate.parents()
476 p1, p2 = self.dirstate.parents()
475 c1 = self.changelog.read(p1)
477 c1 = self.changelog.read(p1)
476 c2 = self.changelog.read(p2)
478 c2 = self.changelog.read(p2)
477 m1 = self.manifest.read(c1[0])
479 m1 = self.manifest.read(c1[0])
478 mf1 = self.manifest.readflags(c1[0])
480 mf1 = self.manifest.readflags(c1[0])
479 m2 = self.manifest.read(c2[0])
481 m2 = self.manifest.read(c2[0])
480
482
481 if not commit and not remove and not force and p2 == nullid:
483 if not commit and not remove and not force and p2 == nullid:
482 self.ui.status(_("nothing changed\n"))
484 self.ui.status(_("nothing changed\n"))
483 return None
485 return None
484
486
485 xp1 = hex(p1)
487 xp1 = hex(p1)
486 if p2 == nullid: xp2 = ''
488 if p2 == nullid: xp2 = ''
487 else: xp2 = hex(p2)
489 else: xp2 = hex(p2)
488
490
489 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
491 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490
492
491 if not wlock:
493 if not wlock:
492 wlock = self.wlock()
494 wlock = self.wlock()
493 if not lock:
495 if not lock:
494 lock = self.lock()
496 lock = self.lock()
495 tr = self.transaction()
497 tr = self.transaction()
496
498
497 # check in files
499 # check in files
498 new = {}
500 new = {}
499 linkrev = self.changelog.count()
501 linkrev = self.changelog.count()
500 commit.sort()
502 commit.sort()
501 for f in commit:
503 for f in commit:
502 self.ui.note(f + "\n")
504 self.ui.note(f + "\n")
503 try:
505 try:
504 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
506 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 t = self.wread(f)
507 t = self.wread(f)
506 except IOError:
508 except IOError:
507 self.ui.warn(_("trouble committing %s!\n") % f)
509 self.ui.warn(_("trouble committing %s!\n") % f)
508 raise
510 raise
509
511
510 r = self.file(f)
512 r = self.file(f)
511
513
512 meta = {}
514 meta = {}
513 cp = self.dirstate.copied(f)
515 cp = self.dirstate.copied(f)
514 if cp:
516 if cp:
515 meta["copy"] = cp
517 meta["copy"] = cp
516 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
518 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
519 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 fp1, fp2 = nullid, nullid
520 fp1, fp2 = nullid, nullid
519 else:
521 else:
520 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
522 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 if entry:
523 if entry:
522 new[f] = entry
524 new[f] = entry
523 continue
525 continue
524
526
525 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
527 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 # remember what we've added so that we can later calculate
528 # remember what we've added so that we can later calculate
527 # the files to pull from a set of changesets
529 # the files to pull from a set of changesets
528 changed.append(f)
530 changed.append(f)
529
531
530 # update manifest
532 # update manifest
531 m1 = m1.copy()
533 m1 = m1.copy()
532 m1.update(new)
534 m1.update(new)
533 for f in remove:
535 for f in remove:
534 if f in m1:
536 if f in m1:
535 del m1[f]
537 del m1[f]
536 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
538 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 (new, remove))
539 (new, remove))
538
540
539 # add changeset
541 # add changeset
540 new = new.keys()
542 new = new.keys()
541 new.sort()
543 new.sort()
542
544
543 user = user or self.ui.username()
545 user = user or self.ui.username()
544 if not text or force_editor:
546 if not text or force_editor:
545 edittext = []
547 edittext = []
546 if text:
548 if text:
547 edittext.append(text)
549 edittext.append(text)
548 edittext.append("")
550 edittext.append("")
549 if p2 != nullid:
551 if p2 != nullid:
550 edittext.append("HG: branch merge")
552 edittext.append("HG: branch merge")
551 edittext.extend(["HG: changed %s" % f for f in changed])
553 edittext.extend(["HG: changed %s" % f for f in changed])
552 edittext.extend(["HG: removed %s" % f for f in remove])
554 edittext.extend(["HG: removed %s" % f for f in remove])
553 if not changed and not remove:
555 if not changed and not remove:
554 edittext.append("HG: no files changed")
556 edittext.append("HG: no files changed")
555 edittext.append("")
557 edittext.append("")
556 # run editor in the repository root
558 # run editor in the repository root
557 olddir = os.getcwd()
559 olddir = os.getcwd()
558 os.chdir(self.root)
560 os.chdir(self.root)
559 text = self.ui.edit("\n".join(edittext), user)
561 text = self.ui.edit("\n".join(edittext), user)
560 os.chdir(olddir)
562 os.chdir(olddir)
561
563
562 lines = [line.rstrip() for line in text.rstrip().splitlines()]
564 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 while lines and not lines[0]:
565 while lines and not lines[0]:
564 del lines[0]
566 del lines[0]
565 if not lines:
567 if not lines:
566 return None
568 return None
567 text = '\n'.join(lines)
569 text = '\n'.join(lines)
568 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
570 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
571 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 parent2=xp2)
572 parent2=xp2)
571 tr.close()
573 tr.close()
572
574
573 self.dirstate.setparents(n)
575 self.dirstate.setparents(n)
574 self.dirstate.update(new, "n")
576 self.dirstate.update(new, "n")
575 self.dirstate.forget(remove)
577 self.dirstate.forget(remove)
576
578
577 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
579 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 return n
580 return n
579
581
580 def walk(self, node=None, files=[], match=util.always, badmatch=None):
582 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 if node:
583 if node:
582 fdict = dict.fromkeys(files)
584 fdict = dict.fromkeys(files)
583 for fn in self.manifest.read(self.changelog.read(node)[0]):
585 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 fdict.pop(fn, None)
586 fdict.pop(fn, None)
585 if match(fn):
587 if match(fn):
586 yield 'm', fn
588 yield 'm', fn
587 for fn in fdict:
589 for fn in fdict:
588 if badmatch and badmatch(fn):
590 if badmatch and badmatch(fn):
589 if match(fn):
591 if match(fn):
590 yield 'b', fn
592 yield 'b', fn
591 else:
593 else:
592 self.ui.warn(_('%s: No such file in rev %s\n') % (
594 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 util.pathto(self.getcwd(), fn), short(node)))
595 util.pathto(self.getcwd(), fn), short(node)))
594 else:
596 else:
595 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
597 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 yield src, fn
598 yield src, fn
597
599
598 def changes(self, node1=None, node2=None, files=[], match=util.always,
600 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 wlock=None, show_ignored=None):
601 wlock=None, show_ignored=None):
600 """return changes between two nodes or node and working directory
602 """return changes between two nodes or node and working directory
601
603
602 If node1 is None, use the first dirstate parent instead.
604 If node1 is None, use the first dirstate parent instead.
603 If node2 is None, compare node1 with working directory.
605 If node2 is None, compare node1 with working directory.
604 """
606 """
605
607
606 def fcmp(fn, mf):
608 def fcmp(fn, mf):
607 t1 = self.wread(fn)
609 t1 = self.wread(fn)
608 t2 = self.file(fn).read(mf.get(fn, nullid))
610 t2 = self.file(fn).read(mf.get(fn, nullid))
609 return cmp(t1, t2)
611 return cmp(t1, t2)
610
612
611 def mfmatches(node):
613 def mfmatches(node):
612 change = self.changelog.read(node)
614 change = self.changelog.read(node)
613 mf = dict(self.manifest.read(change[0]))
615 mf = dict(self.manifest.read(change[0]))
614 for fn in mf.keys():
616 for fn in mf.keys():
615 if not match(fn):
617 if not match(fn):
616 del mf[fn]
618 del mf[fn]
617 return mf
619 return mf
618
620
619 if node1:
621 if node1:
620 # read the manifest from node1 before the manifest from node2,
622 # read the manifest from node1 before the manifest from node2,
621 # so that we'll hit the manifest cache if we're going through
623 # so that we'll hit the manifest cache if we're going through
622 # all the revisions in parent->child order.
624 # all the revisions in parent->child order.
623 mf1 = mfmatches(node1)
625 mf1 = mfmatches(node1)
624
626
625 # are we comparing the working directory?
627 # are we comparing the working directory?
626 if not node2:
628 if not node2:
627 if not wlock:
629 if not wlock:
628 try:
630 try:
629 wlock = self.wlock(wait=0)
631 wlock = self.wlock(wait=0)
630 except lock.LockException:
632 except lock.LockException:
631 wlock = None
633 wlock = None
632 lookup, modified, added, removed, deleted, unknown, ignored = (
634 lookup, modified, added, removed, deleted, unknown, ignored = (
633 self.dirstate.changes(files, match, show_ignored))
635 self.dirstate.changes(files, match, show_ignored))
634
636
635 # are we comparing working dir against its parent?
637 # are we comparing working dir against its parent?
636 if not node1:
638 if not node1:
637 if lookup:
639 if lookup:
638 # do a full compare of any files that might have changed
640 # do a full compare of any files that might have changed
639 mf2 = mfmatches(self.dirstate.parents()[0])
641 mf2 = mfmatches(self.dirstate.parents()[0])
640 for f in lookup:
642 for f in lookup:
641 if fcmp(f, mf2):
643 if fcmp(f, mf2):
642 modified.append(f)
644 modified.append(f)
643 elif wlock is not None:
645 elif wlock is not None:
644 self.dirstate.update([f], "n")
646 self.dirstate.update([f], "n")
645 else:
647 else:
646 # we are comparing working dir against non-parent
648 # we are comparing working dir against non-parent
647 # generate a pseudo-manifest for the working dir
649 # generate a pseudo-manifest for the working dir
648 mf2 = mfmatches(self.dirstate.parents()[0])
650 mf2 = mfmatches(self.dirstate.parents()[0])
649 for f in lookup + modified + added:
651 for f in lookup + modified + added:
650 mf2[f] = ""
652 mf2[f] = ""
651 for f in removed:
653 for f in removed:
652 if f in mf2:
654 if f in mf2:
653 del mf2[f]
655 del mf2[f]
654 else:
656 else:
655 # we are comparing two revisions
657 # we are comparing two revisions
656 deleted, unknown, ignored = [], [], []
658 deleted, unknown, ignored = [], [], []
657 mf2 = mfmatches(node2)
659 mf2 = mfmatches(node2)
658
660
659 if node1:
661 if node1:
660 # flush lists from dirstate before comparing manifests
662 # flush lists from dirstate before comparing manifests
661 modified, added = [], []
663 modified, added = [], []
662
664
663 for fn in mf2:
665 for fn in mf2:
664 if mf1.has_key(fn):
666 if mf1.has_key(fn):
665 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
667 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
666 modified.append(fn)
668 modified.append(fn)
667 del mf1[fn]
669 del mf1[fn]
668 else:
670 else:
669 added.append(fn)
671 added.append(fn)
670
672
671 removed = mf1.keys()
673 removed = mf1.keys()
672
674
673 # sort and return results:
675 # sort and return results:
674 for l in modified, added, removed, deleted, unknown, ignored:
676 for l in modified, added, removed, deleted, unknown, ignored:
675 l.sort()
677 l.sort()
676 if show_ignored is None:
678 if show_ignored is None:
677 return (modified, added, removed, deleted, unknown)
679 return (modified, added, removed, deleted, unknown)
678 else:
680 else:
679 return (modified, added, removed, deleted, unknown, ignored)
681 return (modified, added, removed, deleted, unknown, ignored)
680
682
681 def add(self, list, wlock=None):
683 def add(self, list, wlock=None):
682 if not wlock:
684 if not wlock:
683 wlock = self.wlock()
685 wlock = self.wlock()
684 for f in list:
686 for f in list:
685 p = self.wjoin(f)
687 p = self.wjoin(f)
686 if not os.path.exists(p):
688 if not os.path.exists(p):
687 self.ui.warn(_("%s does not exist!\n") % f)
689 self.ui.warn(_("%s does not exist!\n") % f)
688 elif not os.path.isfile(p):
690 elif not os.path.isfile(p):
689 self.ui.warn(_("%s not added: only files supported currently\n")
691 self.ui.warn(_("%s not added: only files supported currently\n")
690 % f)
692 % f)
691 elif self.dirstate.state(f) in 'an':
693 elif self.dirstate.state(f) in 'an':
692 self.ui.warn(_("%s already tracked!\n") % f)
694 self.ui.warn(_("%s already tracked!\n") % f)
693 else:
695 else:
694 self.dirstate.update([f], "a")
696 self.dirstate.update([f], "a")
695
697
696 def forget(self, list, wlock=None):
698 def forget(self, list, wlock=None):
697 if not wlock:
699 if not wlock:
698 wlock = self.wlock()
700 wlock = self.wlock()
699 for f in list:
701 for f in list:
700 if self.dirstate.state(f) not in 'ai':
702 if self.dirstate.state(f) not in 'ai':
701 self.ui.warn(_("%s not added!\n") % f)
703 self.ui.warn(_("%s not added!\n") % f)
702 else:
704 else:
703 self.dirstate.forget([f])
705 self.dirstate.forget([f])
704
706
705 def remove(self, list, unlink=False, wlock=None):
707 def remove(self, list, unlink=False, wlock=None):
706 if unlink:
708 if unlink:
707 for f in list:
709 for f in list:
708 try:
710 try:
709 util.unlink(self.wjoin(f))
711 util.unlink(self.wjoin(f))
710 except OSError, inst:
712 except OSError, inst:
711 if inst.errno != errno.ENOENT:
713 if inst.errno != errno.ENOENT:
712 raise
714 raise
713 if not wlock:
715 if not wlock:
714 wlock = self.wlock()
716 wlock = self.wlock()
715 for f in list:
717 for f in list:
716 p = self.wjoin(f)
718 p = self.wjoin(f)
717 if os.path.exists(p):
719 if os.path.exists(p):
718 self.ui.warn(_("%s still exists!\n") % f)
720 self.ui.warn(_("%s still exists!\n") % f)
719 elif self.dirstate.state(f) == 'a':
721 elif self.dirstate.state(f) == 'a':
720 self.dirstate.forget([f])
722 self.dirstate.forget([f])
721 elif f not in self.dirstate:
723 elif f not in self.dirstate:
722 self.ui.warn(_("%s not tracked!\n") % f)
724 self.ui.warn(_("%s not tracked!\n") % f)
723 else:
725 else:
724 self.dirstate.update([f], "r")
726 self.dirstate.update([f], "r")
725
727
726 def undelete(self, list, wlock=None):
728 def undelete(self, list, wlock=None):
727 p = self.dirstate.parents()[0]
729 p = self.dirstate.parents()[0]
728 mn = self.changelog.read(p)[0]
730 mn = self.changelog.read(p)[0]
729 mf = self.manifest.readflags(mn)
731 mf = self.manifest.readflags(mn)
730 m = self.manifest.read(mn)
732 m = self.manifest.read(mn)
731 if not wlock:
733 if not wlock:
732 wlock = self.wlock()
734 wlock = self.wlock()
733 for f in list:
735 for f in list:
734 if self.dirstate.state(f) not in "r":
736 if self.dirstate.state(f) not in "r":
735 self.ui.warn("%s not removed!\n" % f)
737 self.ui.warn("%s not removed!\n" % f)
736 else:
738 else:
737 t = self.file(f).read(m[f])
739 t = self.file(f).read(m[f])
738 self.wwrite(f, t)
740 self.wwrite(f, t)
739 util.set_exec(self.wjoin(f), mf[f])
741 util.set_exec(self.wjoin(f), mf[f])
740 self.dirstate.update([f], "n")
742 self.dirstate.update([f], "n")
741
743
742 def copy(self, source, dest, wlock=None):
744 def copy(self, source, dest, wlock=None):
743 p = self.wjoin(dest)
745 p = self.wjoin(dest)
744 if not os.path.exists(p):
746 if not os.path.exists(p):
745 self.ui.warn(_("%s does not exist!\n") % dest)
747 self.ui.warn(_("%s does not exist!\n") % dest)
746 elif not os.path.isfile(p):
748 elif not os.path.isfile(p):
747 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
749 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
748 else:
750 else:
749 if not wlock:
751 if not wlock:
750 wlock = self.wlock()
752 wlock = self.wlock()
751 if self.dirstate.state(dest) == '?':
753 if self.dirstate.state(dest) == '?':
752 self.dirstate.update([dest], "a")
754 self.dirstate.update([dest], "a")
753 self.dirstate.copy(source, dest)
755 self.dirstate.copy(source, dest)
754
756
755 def heads(self, start=None):
757 def heads(self, start=None):
756 heads = self.changelog.heads(start)
758 heads = self.changelog.heads(start)
757 # sort the output in rev descending order
759 # sort the output in rev descending order
758 heads = [(-self.changelog.rev(h), h) for h in heads]
760 heads = [(-self.changelog.rev(h), h) for h in heads]
759 heads.sort()
761 heads.sort()
760 return [n for (r, n) in heads]
762 return [n for (r, n) in heads]
761
763
762 # branchlookup returns a dict giving a list of branches for
764 # branchlookup returns a dict giving a list of branches for
763 # each head. A branch is defined as the tag of a node or
765 # each head. A branch is defined as the tag of a node or
764 # the branch of the node's parents. If a node has multiple
766 # the branch of the node's parents. If a node has multiple
765 # branch tags, tags are eliminated if they are visible from other
767 # branch tags, tags are eliminated if they are visible from other
766 # branch tags.
768 # branch tags.
767 #
769 #
768 # So, for this graph: a->b->c->d->e
770 # So, for this graph: a->b->c->d->e
769 # \ /
771 # \ /
770 # aa -----/
772 # aa -----/
771 # a has tag 2.6.12
773 # a has tag 2.6.12
772 # d has tag 2.6.13
774 # d has tag 2.6.13
773 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
775 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
774 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
776 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
775 # from the list.
777 # from the list.
776 #
778 #
777 # It is possible that more than one head will have the same branch tag.
779 # It is possible that more than one head will have the same branch tag.
778 # callers need to check the result for multiple heads under the same
780 # callers need to check the result for multiple heads under the same
779 # branch tag if that is a problem for them (ie checkout of a specific
781 # branch tag if that is a problem for them (ie checkout of a specific
780 # branch).
782 # branch).
781 #
783 #
782 # passing in a specific branch will limit the depth of the search
784 # passing in a specific branch will limit the depth of the search
783 # through the parents. It won't limit the branches returned in the
785 # through the parents. It won't limit the branches returned in the
784 # result though.
786 # result though.
785 def branchlookup(self, heads=None, branch=None):
787 def branchlookup(self, heads=None, branch=None):
786 if not heads:
788 if not heads:
787 heads = self.heads()
789 heads = self.heads()
788 headt = [ h for h in heads ]
790 headt = [ h for h in heads ]
789 chlog = self.changelog
791 chlog = self.changelog
790 branches = {}
792 branches = {}
791 merges = []
793 merges = []
792 seenmerge = {}
794 seenmerge = {}
793
795
794 # traverse the tree once for each head, recording in the branches
796 # traverse the tree once for each head, recording in the branches
795 # dict which tags are visible from this head. The branches
797 # dict which tags are visible from this head. The branches
796 # dict also records which tags are visible from each tag
798 # dict also records which tags are visible from each tag
797 # while we traverse.
799 # while we traverse.
798 while headt or merges:
800 while headt or merges:
799 if merges:
801 if merges:
800 n, found = merges.pop()
802 n, found = merges.pop()
801 visit = [n]
803 visit = [n]
802 else:
804 else:
803 h = headt.pop()
805 h = headt.pop()
804 visit = [h]
806 visit = [h]
805 found = [h]
807 found = [h]
806 seen = {}
808 seen = {}
807 while visit:
809 while visit:
808 n = visit.pop()
810 n = visit.pop()
809 if n in seen:
811 if n in seen:
810 continue
812 continue
811 pp = chlog.parents(n)
813 pp = chlog.parents(n)
812 tags = self.nodetags(n)
814 tags = self.nodetags(n)
813 if tags:
815 if tags:
814 for x in tags:
816 for x in tags:
815 if x == 'tip':
817 if x == 'tip':
816 continue
818 continue
817 for f in found:
819 for f in found:
818 branches.setdefault(f, {})[n] = 1
820 branches.setdefault(f, {})[n] = 1
819 branches.setdefault(n, {})[n] = 1
821 branches.setdefault(n, {})[n] = 1
820 break
822 break
821 if n not in found:
823 if n not in found:
822 found.append(n)
824 found.append(n)
823 if branch in tags:
825 if branch in tags:
824 continue
826 continue
825 seen[n] = 1
827 seen[n] = 1
826 if pp[1] != nullid and n not in seenmerge:
828 if pp[1] != nullid and n not in seenmerge:
827 merges.append((pp[1], [x for x in found]))
829 merges.append((pp[1], [x for x in found]))
828 seenmerge[n] = 1
830 seenmerge[n] = 1
829 if pp[0] != nullid:
831 if pp[0] != nullid:
830 visit.append(pp[0])
832 visit.append(pp[0])
831 # traverse the branches dict, eliminating branch tags from each
833 # traverse the branches dict, eliminating branch tags from each
832 # head that are visible from another branch tag for that head.
834 # head that are visible from another branch tag for that head.
833 out = {}
835 out = {}
834 viscache = {}
836 viscache = {}
835 for h in heads:
837 for h in heads:
836 def visible(node):
838 def visible(node):
837 if node in viscache:
839 if node in viscache:
838 return viscache[node]
840 return viscache[node]
839 ret = {}
841 ret = {}
840 visit = [node]
842 visit = [node]
841 while visit:
843 while visit:
842 x = visit.pop()
844 x = visit.pop()
843 if x in viscache:
845 if x in viscache:
844 ret.update(viscache[x])
846 ret.update(viscache[x])
845 elif x not in ret:
847 elif x not in ret:
846 ret[x] = 1
848 ret[x] = 1
847 if x in branches:
849 if x in branches:
848 visit[len(visit):] = branches[x].keys()
850 visit[len(visit):] = branches[x].keys()
849 viscache[node] = ret
851 viscache[node] = ret
850 return ret
852 return ret
851 if h not in branches:
853 if h not in branches:
852 continue
854 continue
853 # O(n^2), but somewhat limited. This only searches the
855 # O(n^2), but somewhat limited. This only searches the
854 # tags visible from a specific head, not all the tags in the
856 # tags visible from a specific head, not all the tags in the
855 # whole repo.
857 # whole repo.
856 for b in branches[h]:
858 for b in branches[h]:
857 vis = False
859 vis = False
858 for bb in branches[h].keys():
860 for bb in branches[h].keys():
859 if b != bb:
861 if b != bb:
860 if b in visible(bb):
862 if b in visible(bb):
861 vis = True
863 vis = True
862 break
864 break
863 if not vis:
865 if not vis:
864 l = out.setdefault(h, [])
866 l = out.setdefault(h, [])
865 l[len(l):] = self.nodetags(b)
867 l[len(l):] = self.nodetags(b)
866 return out
868 return out
867
869
868 def branches(self, nodes):
870 def branches(self, nodes):
869 if not nodes:
871 if not nodes:
870 nodes = [self.changelog.tip()]
872 nodes = [self.changelog.tip()]
871 b = []
873 b = []
872 for n in nodes:
874 for n in nodes:
873 t = n
875 t = n
874 while 1:
876 while 1:
875 p = self.changelog.parents(n)
877 p = self.changelog.parents(n)
876 if p[1] != nullid or p[0] == nullid:
878 if p[1] != nullid or p[0] == nullid:
877 b.append((t, n, p[0], p[1]))
879 b.append((t, n, p[0], p[1]))
878 break
880 break
879 n = p[0]
881 n = p[0]
880 return b
882 return b
881
883
882 def between(self, pairs):
884 def between(self, pairs):
883 r = []
885 r = []
884
886
885 for top, bottom in pairs:
887 for top, bottom in pairs:
886 n, l, i = top, [], 0
888 n, l, i = top, [], 0
887 f = 1
889 f = 1
888
890
889 while n != bottom:
891 while n != bottom:
890 p = self.changelog.parents(n)[0]
892 p = self.changelog.parents(n)[0]
891 if i == f:
893 if i == f:
892 l.append(n)
894 l.append(n)
893 f = f * 2
895 f = f * 2
894 n = p
896 n = p
895 i += 1
897 i += 1
896
898
897 r.append(l)
899 r.append(l)
898
900
899 return r
901 return r
900
902
901 def findincoming(self, remote, base=None, heads=None, force=False):
903 def findincoming(self, remote, base=None, heads=None, force=False):
902 """Return list of roots of the subsets of missing nodes from remote
904 """Return list of roots of the subsets of missing nodes from remote
903
905
904 If base dict is specified, assume that these nodes and their parents
906 If base dict is specified, assume that these nodes and their parents
905 exist on the remote side and that no child of a node of base exists
907 exist on the remote side and that no child of a node of base exists
906 in both remote and self.
908 in both remote and self.
907 Furthermore base will be updated to include the nodes that exists
909 Furthermore base will be updated to include the nodes that exists
908 in self and remote but no children exists in self and remote.
910 in self and remote but no children exists in self and remote.
909 If a list of heads is specified, return only nodes which are heads
911 If a list of heads is specified, return only nodes which are heads
910 or ancestors of these heads.
912 or ancestors of these heads.
911
913
912 All the ancestors of base are in self and in remote.
914 All the ancestors of base are in self and in remote.
913 All the descendants of the list returned are missing in self.
915 All the descendants of the list returned are missing in self.
914 (and so we know that the rest of the nodes are missing in remote, see
916 (and so we know that the rest of the nodes are missing in remote, see
915 outgoing)
917 outgoing)
916 """
918 """
917 m = self.changelog.nodemap
919 m = self.changelog.nodemap
918 search = []
920 search = []
919 fetch = {}
921 fetch = {}
920 seen = {}
922 seen = {}
921 seenbranch = {}
923 seenbranch = {}
922 if base == None:
924 if base == None:
923 base = {}
925 base = {}
924
926
925 if not heads:
927 if not heads:
926 heads = remote.heads()
928 heads = remote.heads()
927
929
928 if self.changelog.tip() == nullid:
930 if self.changelog.tip() == nullid:
929 base[nullid] = 1
931 base[nullid] = 1
930 if heads != [nullid]:
932 if heads != [nullid]:
931 return [nullid]
933 return [nullid]
932 return []
934 return []
933
935
934 # assume we're closer to the tip than the root
936 # assume we're closer to the tip than the root
935 # and start by examining the heads
937 # and start by examining the heads
936 self.ui.status(_("searching for changes\n"))
938 self.ui.status(_("searching for changes\n"))
937
939
938 unknown = []
940 unknown = []
939 for h in heads:
941 for h in heads:
940 if h not in m:
942 if h not in m:
941 unknown.append(h)
943 unknown.append(h)
942 else:
944 else:
943 base[h] = 1
945 base[h] = 1
944
946
945 if not unknown:
947 if not unknown:
946 return []
948 return []
947
949
948 req = dict.fromkeys(unknown)
950 req = dict.fromkeys(unknown)
949 reqcnt = 0
951 reqcnt = 0
950
952
951 # search through remote branches
953 # search through remote branches
952 # a 'branch' here is a linear segment of history, with four parts:
954 # a 'branch' here is a linear segment of history, with four parts:
953 # head, root, first parent, second parent
955 # head, root, first parent, second parent
954 # (a branch always has two parents (or none) by definition)
956 # (a branch always has two parents (or none) by definition)
955 unknown = remote.branches(unknown)
957 unknown = remote.branches(unknown)
956 while unknown:
958 while unknown:
957 r = []
959 r = []
958 while unknown:
960 while unknown:
959 n = unknown.pop(0)
961 n = unknown.pop(0)
960 if n[0] in seen:
962 if n[0] in seen:
961 continue
963 continue
962
964
963 self.ui.debug(_("examining %s:%s\n")
965 self.ui.debug(_("examining %s:%s\n")
964 % (short(n[0]), short(n[1])))
966 % (short(n[0]), short(n[1])))
965 if n[0] == nullid: # found the end of the branch
967 if n[0] == nullid: # found the end of the branch
966 pass
968 pass
967 elif n in seenbranch:
969 elif n in seenbranch:
968 self.ui.debug(_("branch already found\n"))
970 self.ui.debug(_("branch already found\n"))
969 continue
971 continue
970 elif n[1] and n[1] in m: # do we know the base?
972 elif n[1] and n[1] in m: # do we know the base?
971 self.ui.debug(_("found incomplete branch %s:%s\n")
973 self.ui.debug(_("found incomplete branch %s:%s\n")
972 % (short(n[0]), short(n[1])))
974 % (short(n[0]), short(n[1])))
973 search.append(n) # schedule branch range for scanning
975 search.append(n) # schedule branch range for scanning
974 seenbranch[n] = 1
976 seenbranch[n] = 1
975 else:
977 else:
976 if n[1] not in seen and n[1] not in fetch:
978 if n[1] not in seen and n[1] not in fetch:
977 if n[2] in m and n[3] in m:
979 if n[2] in m and n[3] in m:
978 self.ui.debug(_("found new changeset %s\n") %
980 self.ui.debug(_("found new changeset %s\n") %
979 short(n[1]))
981 short(n[1]))
980 fetch[n[1]] = 1 # earliest unknown
982 fetch[n[1]] = 1 # earliest unknown
981 for p in n[2:4]:
983 for p in n[2:4]:
982 if p in m:
984 if p in m:
983 base[p] = 1 # latest known
985 base[p] = 1 # latest known
984
986
985 for p in n[2:4]:
987 for p in n[2:4]:
986 if p not in req and p not in m:
988 if p not in req and p not in m:
987 r.append(p)
989 r.append(p)
988 req[p] = 1
990 req[p] = 1
989 seen[n[0]] = 1
991 seen[n[0]] = 1
990
992
991 if r:
993 if r:
992 reqcnt += 1
994 reqcnt += 1
993 self.ui.debug(_("request %d: %s\n") %
995 self.ui.debug(_("request %d: %s\n") %
994 (reqcnt, " ".join(map(short, r))))
996 (reqcnt, " ".join(map(short, r))))
995 for p in range(0, len(r), 10):
997 for p in range(0, len(r), 10):
996 for b in remote.branches(r[p:p+10]):
998 for b in remote.branches(r[p:p+10]):
997 self.ui.debug(_("received %s:%s\n") %
999 self.ui.debug(_("received %s:%s\n") %
998 (short(b[0]), short(b[1])))
1000 (short(b[0]), short(b[1])))
999 unknown.append(b)
1001 unknown.append(b)
1000
1002
1001 # do binary search on the branches we found
1003 # do binary search on the branches we found
1002 while search:
1004 while search:
1003 n = search.pop(0)
1005 n = search.pop(0)
1004 reqcnt += 1
1006 reqcnt += 1
1005 l = remote.between([(n[0], n[1])])[0]
1007 l = remote.between([(n[0], n[1])])[0]
1006 l.append(n[1])
1008 l.append(n[1])
1007 p = n[0]
1009 p = n[0]
1008 f = 1
1010 f = 1
1009 for i in l:
1011 for i in l:
1010 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1012 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1011 if i in m:
1013 if i in m:
1012 if f <= 2:
1014 if f <= 2:
1013 self.ui.debug(_("found new branch changeset %s\n") %
1015 self.ui.debug(_("found new branch changeset %s\n") %
1014 short(p))
1016 short(p))
1015 fetch[p] = 1
1017 fetch[p] = 1
1016 base[i] = 1
1018 base[i] = 1
1017 else:
1019 else:
1018 self.ui.debug(_("narrowed branch search to %s:%s\n")
1020 self.ui.debug(_("narrowed branch search to %s:%s\n")
1019 % (short(p), short(i)))
1021 % (short(p), short(i)))
1020 search.append((p, i))
1022 search.append((p, i))
1021 break
1023 break
1022 p, f = i, f * 2
1024 p, f = i, f * 2
1023
1025
1024 # sanity check our fetch list
1026 # sanity check our fetch list
1025 for f in fetch.keys():
1027 for f in fetch.keys():
1026 if f in m:
1028 if f in m:
1027 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1029 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1028
1030
1029 if base.keys() == [nullid]:
1031 if base.keys() == [nullid]:
1030 if force:
1032 if force:
1031 self.ui.warn(_("warning: repository is unrelated\n"))
1033 self.ui.warn(_("warning: repository is unrelated\n"))
1032 else:
1034 else:
1033 raise util.Abort(_("repository is unrelated"))
1035 raise util.Abort(_("repository is unrelated"))
1034
1036
1035 self.ui.note(_("found new changesets starting at ") +
1037 self.ui.note(_("found new changesets starting at ") +
1036 " ".join([short(f) for f in fetch]) + "\n")
1038 " ".join([short(f) for f in fetch]) + "\n")
1037
1039
1038 self.ui.debug(_("%d total queries\n") % reqcnt)
1040 self.ui.debug(_("%d total queries\n") % reqcnt)
1039
1041
1040 return fetch.keys()
1042 return fetch.keys()
1041
1043
1042 def findoutgoing(self, remote, base=None, heads=None, force=False):
1044 def findoutgoing(self, remote, base=None, heads=None, force=False):
1043 """Return list of nodes that are roots of subsets not in remote
1045 """Return list of nodes that are roots of subsets not in remote
1044
1046
1045 If base dict is specified, assume that these nodes and their parents
1047 If base dict is specified, assume that these nodes and their parents
1046 exist on the remote side.
1048 exist on the remote side.
1047 If a list of heads is specified, return only nodes which are heads
1049 If a list of heads is specified, return only nodes which are heads
1048 or ancestors of these heads, and return a second element which
1050 or ancestors of these heads, and return a second element which
1049 contains all remote heads which get new children.
1051 contains all remote heads which get new children.
1050 """
1052 """
1051 if base == None:
1053 if base == None:
1052 base = {}
1054 base = {}
1053 self.findincoming(remote, base, heads, force=force)
1055 self.findincoming(remote, base, heads, force=force)
1054
1056
1055 self.ui.debug(_("common changesets up to ")
1057 self.ui.debug(_("common changesets up to ")
1056 + " ".join(map(short, base.keys())) + "\n")
1058 + " ".join(map(short, base.keys())) + "\n")
1057
1059
1058 remain = dict.fromkeys(self.changelog.nodemap)
1060 remain = dict.fromkeys(self.changelog.nodemap)
1059
1061
1060 # prune everything remote has from the tree
1062 # prune everything remote has from the tree
1061 del remain[nullid]
1063 del remain[nullid]
1062 remove = base.keys()
1064 remove = base.keys()
1063 while remove:
1065 while remove:
1064 n = remove.pop(0)
1066 n = remove.pop(0)
1065 if n in remain:
1067 if n in remain:
1066 del remain[n]
1068 del remain[n]
1067 for p in self.changelog.parents(n):
1069 for p in self.changelog.parents(n):
1068 remove.append(p)
1070 remove.append(p)
1069
1071
1070 # find every node whose parents have been pruned
1072 # find every node whose parents have been pruned
1071 subset = []
1073 subset = []
1072 # find every remote head that will get new children
1074 # find every remote head that will get new children
1073 updated_heads = {}
1075 updated_heads = {}
1074 for n in remain:
1076 for n in remain:
1075 p1, p2 = self.changelog.parents(n)
1077 p1, p2 = self.changelog.parents(n)
1076 if p1 not in remain and p2 not in remain:
1078 if p1 not in remain and p2 not in remain:
1077 subset.append(n)
1079 subset.append(n)
1078 if heads:
1080 if heads:
1079 if p1 in heads:
1081 if p1 in heads:
1080 updated_heads[p1] = True
1082 updated_heads[p1] = True
1081 if p2 in heads:
1083 if p2 in heads:
1082 updated_heads[p2] = True
1084 updated_heads[p2] = True
1083
1085
1084 # this is the set of all roots we have to push
1086 # this is the set of all roots we have to push
1085 if heads:
1087 if heads:
1086 return subset, updated_heads.keys()
1088 return subset, updated_heads.keys()
1087 else:
1089 else:
1088 return subset
1090 return subset
1089
1091
1090 def pull(self, remote, heads=None, force=False):
1092 def pull(self, remote, heads=None, force=False):
1091 l = self.lock()
1093 l = self.lock()
1092
1094
1093 fetch = self.findincoming(remote, force=force)
1095 fetch = self.findincoming(remote, force=force)
1094 if fetch == [nullid]:
1096 if fetch == [nullid]:
1095 self.ui.status(_("requesting all changes\n"))
1097 self.ui.status(_("requesting all changes\n"))
1096
1098
1097 if not fetch:
1099 if not fetch:
1098 self.ui.status(_("no changes found\n"))
1100 self.ui.status(_("no changes found\n"))
1099 return 0
1101 return 0
1100
1102
1101 if heads is None:
1103 if heads is None:
1102 cg = remote.changegroup(fetch, 'pull')
1104 cg = remote.changegroup(fetch, 'pull')
1103 else:
1105 else:
1104 cg = remote.changegroupsubset(fetch, heads, 'pull')
1106 cg = remote.changegroupsubset(fetch, heads, 'pull')
1105 return self.addchangegroup(cg, 'pull')
1107 return self.addchangegroup(cg, 'pull')
1106
1108
1107 def push(self, remote, force=False, revs=None):
1109 def push(self, remote, force=False, revs=None):
1108 lock = remote.lock()
1110 # there are two ways to push to remote repo:
1111 #
1112 # addchangegroup assumes local user can lock remote
1113 # repo (local filesystem, old ssh servers).
1114 #
1115 # unbundle assumes local user cannot lock remote repo (new ssh
1116 # servers, http servers).
1109
1117
1118 if 'unbundle' in remote.capabilities:
1119 self.push_unbundle(remote, force, revs)
1120 else:
1121 self.push_addchangegroup(remote, force, revs)
1122
1123 def prepush(self, remote, force, revs):
1110 base = {}
1124 base = {}
1111 remote_heads = remote.heads()
1125 remote_heads = remote.heads()
1112 inc = self.findincoming(remote, base, remote_heads, force=force)
1126 inc = self.findincoming(remote, base, remote_heads, force=force)
1113 if not force and inc:
1127 if not force and inc:
1114 self.ui.warn(_("abort: unsynced remote changes!\n"))
1128 self.ui.warn(_("abort: unsynced remote changes!\n"))
1115 self.ui.status(_("(did you forget to sync?"
1129 self.ui.status(_("(did you forget to sync?"
1116 " use push -f to force)\n"))
1130 " use push -f to force)\n"))
1117 return 1
1131 return None, 1
1118
1132
1119 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1133 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1120 if revs is not None:
1134 if revs is not None:
1121 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1135 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1122 else:
1136 else:
1123 bases, heads = update, self.changelog.heads()
1137 bases, heads = update, self.changelog.heads()
1124
1138
1125 if not bases:
1139 if not bases:
1126 self.ui.status(_("no changes found\n"))
1140 self.ui.status(_("no changes found\n"))
1127 return 1
1141 return None, 1
1128 elif not force:
1142 elif not force:
1129 # FIXME we don't properly detect creation of new heads
1143 # FIXME we don't properly detect creation of new heads
1130 # in the push -r case, assume the user knows what he's doing
1144 # in the push -r case, assume the user knows what he's doing
1131 if not revs and len(remote_heads) < len(heads) \
1145 if not revs and len(remote_heads) < len(heads) \
1132 and remote_heads != [nullid]:
1146 and remote_heads != [nullid]:
1133 self.ui.warn(_("abort: push creates new remote branches!\n"))
1147 self.ui.warn(_("abort: push creates new remote branches!\n"))
1134 self.ui.status(_("(did you forget to merge?"
1148 self.ui.status(_("(did you forget to merge?"
1135 " use push -f to force)\n"))
1149 " use push -f to force)\n"))
1136 return 1
1150 return None, 1
1137
1151
1138 if revs is None:
1152 if revs is None:
1139 cg = self.changegroup(update, 'push')
1153 cg = self.changegroup(update, 'push')
1140 else:
1154 else:
1141 cg = self.changegroupsubset(update, revs, 'push')
1155 cg = self.changegroupsubset(update, revs, 'push')
1142 return remote.addchangegroup(cg, 'push')
1156 return cg, remote_heads
1157
1158 def push_addchangegroup(self, remote, force, revs):
1159 lock = remote.lock()
1160
1161 ret = self.prepush(remote, force, revs)
1162 if ret[0] is not None:
1163 cg, remote_heads = ret
1164 return remote.addchangegroup(cg, 'push')
1165 return ret[1]
1166
1167 def push_unbundle(self, remote, force, revs):
1168 # local repo finds heads on server, finds out what revs it
1169 # must push. once revs transferred, if server finds it has
1170 # different heads (someone else won commit/push race), server
1171 # aborts.
1172
1173 ret = self.prepush(remote, force, revs)
1174 if ret[0] is not None:
1175 cg, remote_heads = ret
1176 if force: remote_heads = ['force']
1177 return remote.unbundle(cg, remote_heads, 'push')
1178 return ret[1]
1143
1179
1144 def changegroupsubset(self, bases, heads, source):
1180 def changegroupsubset(self, bases, heads, source):
1145 """This function generates a changegroup consisting of all the nodes
1181 """This function generates a changegroup consisting of all the nodes
1146 that are descendents of any of the bases, and ancestors of any of
1182 that are descendents of any of the bases, and ancestors of any of
1147 the heads.
1183 the heads.
1148
1184
1149 It is fairly complex as determining which filenodes and which
1185 It is fairly complex as determining which filenodes and which
1150 manifest nodes need to be included for the changeset to be complete
1186 manifest nodes need to be included for the changeset to be complete
1151 is non-trivial.
1187 is non-trivial.
1152
1188
1153 Another wrinkle is doing the reverse, figuring out which changeset in
1189 Another wrinkle is doing the reverse, figuring out which changeset in
1154 the changegroup a particular filenode or manifestnode belongs to."""
1190 the changegroup a particular filenode or manifestnode belongs to."""
1155
1191
1156 self.hook('preoutgoing', throw=True, source=source)
1192 self.hook('preoutgoing', throw=True, source=source)
1157
1193
1158 # Set up some initial variables
1194 # Set up some initial variables
1159 # Make it easy to refer to self.changelog
1195 # Make it easy to refer to self.changelog
1160 cl = self.changelog
1196 cl = self.changelog
1161 # msng is short for missing - compute the list of changesets in this
1197 # msng is short for missing - compute the list of changesets in this
1162 # changegroup.
1198 # changegroup.
1163 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1199 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1164 # Some bases may turn out to be superfluous, and some heads may be
1200 # Some bases may turn out to be superfluous, and some heads may be
1165 # too. nodesbetween will return the minimal set of bases and heads
1201 # too. nodesbetween will return the minimal set of bases and heads
1166 # necessary to re-create the changegroup.
1202 # necessary to re-create the changegroup.
1167
1203
1168 # Known heads are the list of heads that it is assumed the recipient
1204 # Known heads are the list of heads that it is assumed the recipient
1169 # of this changegroup will know about.
1205 # of this changegroup will know about.
1170 knownheads = {}
1206 knownheads = {}
1171 # We assume that all parents of bases are known heads.
1207 # We assume that all parents of bases are known heads.
1172 for n in bases:
1208 for n in bases:
1173 for p in cl.parents(n):
1209 for p in cl.parents(n):
1174 if p != nullid:
1210 if p != nullid:
1175 knownheads[p] = 1
1211 knownheads[p] = 1
1176 knownheads = knownheads.keys()
1212 knownheads = knownheads.keys()
1177 if knownheads:
1213 if knownheads:
1178 # Now that we know what heads are known, we can compute which
1214 # Now that we know what heads are known, we can compute which
1179 # changesets are known. The recipient must know about all
1215 # changesets are known. The recipient must know about all
1180 # changesets required to reach the known heads from the null
1216 # changesets required to reach the known heads from the null
1181 # changeset.
1217 # changeset.
1182 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1218 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1183 junk = None
1219 junk = None
1184 # Transform the list into an ersatz set.
1220 # Transform the list into an ersatz set.
1185 has_cl_set = dict.fromkeys(has_cl_set)
1221 has_cl_set = dict.fromkeys(has_cl_set)
1186 else:
1222 else:
1187 # If there were no known heads, the recipient cannot be assumed to
1223 # If there were no known heads, the recipient cannot be assumed to
1188 # know about any changesets.
1224 # know about any changesets.
1189 has_cl_set = {}
1225 has_cl_set = {}
1190
1226
1191 # Make it easy to refer to self.manifest
1227 # Make it easy to refer to self.manifest
1192 mnfst = self.manifest
1228 mnfst = self.manifest
1193 # We don't know which manifests are missing yet
1229 # We don't know which manifests are missing yet
1194 msng_mnfst_set = {}
1230 msng_mnfst_set = {}
1195 # Nor do we know which filenodes are missing.
1231 # Nor do we know which filenodes are missing.
1196 msng_filenode_set = {}
1232 msng_filenode_set = {}
1197
1233
1198 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1234 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1199 junk = None
1235 junk = None
1200
1236
1201 # A changeset always belongs to itself, so the changenode lookup
1237 # A changeset always belongs to itself, so the changenode lookup
1202 # function for a changenode is identity.
1238 # function for a changenode is identity.
1203 def identity(x):
1239 def identity(x):
1204 return x
1240 return x
1205
1241
1206 # A function generating function. Sets up an environment for the
1242 # A function generating function. Sets up an environment for the
1207 # inner function.
1243 # inner function.
1208 def cmp_by_rev_func(revlog):
1244 def cmp_by_rev_func(revlog):
1209 # Compare two nodes by their revision number in the environment's
1245 # Compare two nodes by their revision number in the environment's
1210 # revision history. Since the revision number both represents the
1246 # revision history. Since the revision number both represents the
1211 # most efficient order to read the nodes in, and represents a
1247 # most efficient order to read the nodes in, and represents a
1212 # topological sorting of the nodes, this function is often useful.
1248 # topological sorting of the nodes, this function is often useful.
1213 def cmp_by_rev(a, b):
1249 def cmp_by_rev(a, b):
1214 return cmp(revlog.rev(a), revlog.rev(b))
1250 return cmp(revlog.rev(a), revlog.rev(b))
1215 return cmp_by_rev
1251 return cmp_by_rev
1216
1252
1217 # If we determine that a particular file or manifest node must be a
1253 # If we determine that a particular file or manifest node must be a
1218 # node that the recipient of the changegroup will already have, we can
1254 # node that the recipient of the changegroup will already have, we can
1219 # also assume the recipient will have all the parents. This function
1255 # also assume the recipient will have all the parents. This function
1220 # prunes them from the set of missing nodes.
1256 # prunes them from the set of missing nodes.
1221 def prune_parents(revlog, hasset, msngset):
1257 def prune_parents(revlog, hasset, msngset):
1222 haslst = hasset.keys()
1258 haslst = hasset.keys()
1223 haslst.sort(cmp_by_rev_func(revlog))
1259 haslst.sort(cmp_by_rev_func(revlog))
1224 for node in haslst:
1260 for node in haslst:
1225 parentlst = [p for p in revlog.parents(node) if p != nullid]
1261 parentlst = [p for p in revlog.parents(node) if p != nullid]
1226 while parentlst:
1262 while parentlst:
1227 n = parentlst.pop()
1263 n = parentlst.pop()
1228 if n not in hasset:
1264 if n not in hasset:
1229 hasset[n] = 1
1265 hasset[n] = 1
1230 p = [p for p in revlog.parents(n) if p != nullid]
1266 p = [p for p in revlog.parents(n) if p != nullid]
1231 parentlst.extend(p)
1267 parentlst.extend(p)
1232 for n in hasset:
1268 for n in hasset:
1233 msngset.pop(n, None)
1269 msngset.pop(n, None)
1234
1270
1235 # This is a function generating function used to set up an environment
1271 # This is a function generating function used to set up an environment
1236 # for the inner function to execute in.
1272 # for the inner function to execute in.
1237 def manifest_and_file_collector(changedfileset):
1273 def manifest_and_file_collector(changedfileset):
1238 # This is an information gathering function that gathers
1274 # This is an information gathering function that gathers
1239 # information from each changeset node that goes out as part of
1275 # information from each changeset node that goes out as part of
1240 # the changegroup. The information gathered is a list of which
1276 # the changegroup. The information gathered is a list of which
1241 # manifest nodes are potentially required (the recipient may
1277 # manifest nodes are potentially required (the recipient may
1242 # already have them) and total list of all files which were
1278 # already have them) and total list of all files which were
1243 # changed in any changeset in the changegroup.
1279 # changed in any changeset in the changegroup.
1244 #
1280 #
1245 # We also remember the first changenode we saw any manifest
1281 # We also remember the first changenode we saw any manifest
1246 # referenced by so we can later determine which changenode 'owns'
1282 # referenced by so we can later determine which changenode 'owns'
1247 # the manifest.
1283 # the manifest.
1248 def collect_manifests_and_files(clnode):
1284 def collect_manifests_and_files(clnode):
1249 c = cl.read(clnode)
1285 c = cl.read(clnode)
1250 for f in c[3]:
1286 for f in c[3]:
1251 # This is to make sure we only have one instance of each
1287 # This is to make sure we only have one instance of each
1252 # filename string for each filename.
1288 # filename string for each filename.
1253 changedfileset.setdefault(f, f)
1289 changedfileset.setdefault(f, f)
1254 msng_mnfst_set.setdefault(c[0], clnode)
1290 msng_mnfst_set.setdefault(c[0], clnode)
1255 return collect_manifests_and_files
1291 return collect_manifests_and_files
1256
1292
1257 # Figure out which manifest nodes (of the ones we think might be part
1293 # Figure out which manifest nodes (of the ones we think might be part
1258 # of the changegroup) the recipient must know about and remove them
1294 # of the changegroup) the recipient must know about and remove them
1259 # from the changegroup.
1295 # from the changegroup.
1260 def prune_manifests():
1296 def prune_manifests():
1261 has_mnfst_set = {}
1297 has_mnfst_set = {}
1262 for n in msng_mnfst_set:
1298 for n in msng_mnfst_set:
1263 # If a 'missing' manifest thinks it belongs to a changenode
1299 # If a 'missing' manifest thinks it belongs to a changenode
1264 # the recipient is assumed to have, obviously the recipient
1300 # the recipient is assumed to have, obviously the recipient
1265 # must have that manifest.
1301 # must have that manifest.
1266 linknode = cl.node(mnfst.linkrev(n))
1302 linknode = cl.node(mnfst.linkrev(n))
1267 if linknode in has_cl_set:
1303 if linknode in has_cl_set:
1268 has_mnfst_set[n] = 1
1304 has_mnfst_set[n] = 1
1269 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1305 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1270
1306
1271 # Use the information collected in collect_manifests_and_files to say
1307 # Use the information collected in collect_manifests_and_files to say
1272 # which changenode any manifestnode belongs to.
1308 # which changenode any manifestnode belongs to.
1273 def lookup_manifest_link(mnfstnode):
1309 def lookup_manifest_link(mnfstnode):
1274 return msng_mnfst_set[mnfstnode]
1310 return msng_mnfst_set[mnfstnode]
1275
1311
1276 # A function generating function that sets up the initial environment
1312 # A function generating function that sets up the initial environment
1277 # the inner function.
1313 # the inner function.
1278 def filenode_collector(changedfiles):
1314 def filenode_collector(changedfiles):
1279 next_rev = [0]
1315 next_rev = [0]
1280 # This gathers information from each manifestnode included in the
1316 # This gathers information from each manifestnode included in the
1281 # changegroup about which filenodes the manifest node references
1317 # changegroup about which filenodes the manifest node references
1282 # so we can include those in the changegroup too.
1318 # so we can include those in the changegroup too.
1283 #
1319 #
1284 # It also remembers which changenode each filenode belongs to. It
1320 # It also remembers which changenode each filenode belongs to. It
1285 # does this by assuming the a filenode belongs to the changenode
1321 # does this by assuming the a filenode belongs to the changenode
1286 # the first manifest that references it belongs to.
1322 # the first manifest that references it belongs to.
1287 def collect_msng_filenodes(mnfstnode):
1323 def collect_msng_filenodes(mnfstnode):
1288 r = mnfst.rev(mnfstnode)
1324 r = mnfst.rev(mnfstnode)
1289 if r == next_rev[0]:
1325 if r == next_rev[0]:
1290 # If the last rev we looked at was the one just previous,
1326 # If the last rev we looked at was the one just previous,
1291 # we only need to see a diff.
1327 # we only need to see a diff.
1292 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1328 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1293 # For each line in the delta
1329 # For each line in the delta
1294 for dline in delta.splitlines():
1330 for dline in delta.splitlines():
1295 # get the filename and filenode for that line
1331 # get the filename and filenode for that line
1296 f, fnode = dline.split('\0')
1332 f, fnode = dline.split('\0')
1297 fnode = bin(fnode[:40])
1333 fnode = bin(fnode[:40])
1298 f = changedfiles.get(f, None)
1334 f = changedfiles.get(f, None)
1299 # And if the file is in the list of files we care
1335 # And if the file is in the list of files we care
1300 # about.
1336 # about.
1301 if f is not None:
1337 if f is not None:
1302 # Get the changenode this manifest belongs to
1338 # Get the changenode this manifest belongs to
1303 clnode = msng_mnfst_set[mnfstnode]
1339 clnode = msng_mnfst_set[mnfstnode]
1304 # Create the set of filenodes for the file if
1340 # Create the set of filenodes for the file if
1305 # there isn't one already.
1341 # there isn't one already.
1306 ndset = msng_filenode_set.setdefault(f, {})
1342 ndset = msng_filenode_set.setdefault(f, {})
1307 # And set the filenode's changelog node to the
1343 # And set the filenode's changelog node to the
1308 # manifest's if it hasn't been set already.
1344 # manifest's if it hasn't been set already.
1309 ndset.setdefault(fnode, clnode)
1345 ndset.setdefault(fnode, clnode)
1310 else:
1346 else:
1311 # Otherwise we need a full manifest.
1347 # Otherwise we need a full manifest.
1312 m = mnfst.read(mnfstnode)
1348 m = mnfst.read(mnfstnode)
1313 # For every file in we care about.
1349 # For every file in we care about.
1314 for f in changedfiles:
1350 for f in changedfiles:
1315 fnode = m.get(f, None)
1351 fnode = m.get(f, None)
1316 # If it's in the manifest
1352 # If it's in the manifest
1317 if fnode is not None:
1353 if fnode is not None:
1318 # See comments above.
1354 # See comments above.
1319 clnode = msng_mnfst_set[mnfstnode]
1355 clnode = msng_mnfst_set[mnfstnode]
1320 ndset = msng_filenode_set.setdefault(f, {})
1356 ndset = msng_filenode_set.setdefault(f, {})
1321 ndset.setdefault(fnode, clnode)
1357 ndset.setdefault(fnode, clnode)
1322 # Remember the revision we hope to see next.
1358 # Remember the revision we hope to see next.
1323 next_rev[0] = r + 1
1359 next_rev[0] = r + 1
1324 return collect_msng_filenodes
1360 return collect_msng_filenodes
1325
1361
1326 # We have a list of filenodes we think we need for a file, lets remove
1362 # We have a list of filenodes we think we need for a file, lets remove
1327 # all those we now the recipient must have.
1363 # all those we now the recipient must have.
1328 def prune_filenodes(f, filerevlog):
1364 def prune_filenodes(f, filerevlog):
1329 msngset = msng_filenode_set[f]
1365 msngset = msng_filenode_set[f]
1330 hasset = {}
1366 hasset = {}
1331 # If a 'missing' filenode thinks it belongs to a changenode we
1367 # If a 'missing' filenode thinks it belongs to a changenode we
1332 # assume the recipient must have, then the recipient must have
1368 # assume the recipient must have, then the recipient must have
1333 # that filenode.
1369 # that filenode.
1334 for n in msngset:
1370 for n in msngset:
1335 clnode = cl.node(filerevlog.linkrev(n))
1371 clnode = cl.node(filerevlog.linkrev(n))
1336 if clnode in has_cl_set:
1372 if clnode in has_cl_set:
1337 hasset[n] = 1
1373 hasset[n] = 1
1338 prune_parents(filerevlog, hasset, msngset)
1374 prune_parents(filerevlog, hasset, msngset)
1339
1375
1340 # A function generator function that sets up the a context for the
1376 # A function generator function that sets up the a context for the
1341 # inner function.
1377 # inner function.
1342 def lookup_filenode_link_func(fname):
1378 def lookup_filenode_link_func(fname):
1343 msngset = msng_filenode_set[fname]
1379 msngset = msng_filenode_set[fname]
1344 # Lookup the changenode the filenode belongs to.
1380 # Lookup the changenode the filenode belongs to.
1345 def lookup_filenode_link(fnode):
1381 def lookup_filenode_link(fnode):
1346 return msngset[fnode]
1382 return msngset[fnode]
1347 return lookup_filenode_link
1383 return lookup_filenode_link
1348
1384
1349 # Now that we have all theses utility functions to help out and
1385 # Now that we have all theses utility functions to help out and
1350 # logically divide up the task, generate the group.
1386 # logically divide up the task, generate the group.
1351 def gengroup():
1387 def gengroup():
1352 # The set of changed files starts empty.
1388 # The set of changed files starts empty.
1353 changedfiles = {}
1389 changedfiles = {}
1354 # Create a changenode group generator that will call our functions
1390 # Create a changenode group generator that will call our functions
1355 # back to lookup the owning changenode and collect information.
1391 # back to lookup the owning changenode and collect information.
1356 group = cl.group(msng_cl_lst, identity,
1392 group = cl.group(msng_cl_lst, identity,
1357 manifest_and_file_collector(changedfiles))
1393 manifest_and_file_collector(changedfiles))
1358 for chnk in group:
1394 for chnk in group:
1359 yield chnk
1395 yield chnk
1360
1396
1361 # The list of manifests has been collected by the generator
1397 # The list of manifests has been collected by the generator
1362 # calling our functions back.
1398 # calling our functions back.
1363 prune_manifests()
1399 prune_manifests()
1364 msng_mnfst_lst = msng_mnfst_set.keys()
1400 msng_mnfst_lst = msng_mnfst_set.keys()
1365 # Sort the manifestnodes by revision number.
1401 # Sort the manifestnodes by revision number.
1366 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1402 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1367 # Create a generator for the manifestnodes that calls our lookup
1403 # Create a generator for the manifestnodes that calls our lookup
1368 # and data collection functions back.
1404 # and data collection functions back.
1369 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1405 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1370 filenode_collector(changedfiles))
1406 filenode_collector(changedfiles))
1371 for chnk in group:
1407 for chnk in group:
1372 yield chnk
1408 yield chnk
1373
1409
1374 # These are no longer needed, dereference and toss the memory for
1410 # These are no longer needed, dereference and toss the memory for
1375 # them.
1411 # them.
1376 msng_mnfst_lst = None
1412 msng_mnfst_lst = None
1377 msng_mnfst_set.clear()
1413 msng_mnfst_set.clear()
1378
1414
1379 changedfiles = changedfiles.keys()
1415 changedfiles = changedfiles.keys()
1380 changedfiles.sort()
1416 changedfiles.sort()
1381 # Go through all our files in order sorted by name.
1417 # Go through all our files in order sorted by name.
1382 for fname in changedfiles:
1418 for fname in changedfiles:
1383 filerevlog = self.file(fname)
1419 filerevlog = self.file(fname)
1384 # Toss out the filenodes that the recipient isn't really
1420 # Toss out the filenodes that the recipient isn't really
1385 # missing.
1421 # missing.
1386 if msng_filenode_set.has_key(fname):
1422 if msng_filenode_set.has_key(fname):
1387 prune_filenodes(fname, filerevlog)
1423 prune_filenodes(fname, filerevlog)
1388 msng_filenode_lst = msng_filenode_set[fname].keys()
1424 msng_filenode_lst = msng_filenode_set[fname].keys()
1389 else:
1425 else:
1390 msng_filenode_lst = []
1426 msng_filenode_lst = []
1391 # If any filenodes are left, generate the group for them,
1427 # If any filenodes are left, generate the group for them,
1392 # otherwise don't bother.
1428 # otherwise don't bother.
1393 if len(msng_filenode_lst) > 0:
1429 if len(msng_filenode_lst) > 0:
1394 yield changegroup.genchunk(fname)
1430 yield changegroup.genchunk(fname)
1395 # Sort the filenodes by their revision #
1431 # Sort the filenodes by their revision #
1396 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1432 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1397 # Create a group generator and only pass in a changenode
1433 # Create a group generator and only pass in a changenode
1398 # lookup function as we need to collect no information
1434 # lookup function as we need to collect no information
1399 # from filenodes.
1435 # from filenodes.
1400 group = filerevlog.group(msng_filenode_lst,
1436 group = filerevlog.group(msng_filenode_lst,
1401 lookup_filenode_link_func(fname))
1437 lookup_filenode_link_func(fname))
1402 for chnk in group:
1438 for chnk in group:
1403 yield chnk
1439 yield chnk
1404 if msng_filenode_set.has_key(fname):
1440 if msng_filenode_set.has_key(fname):
1405 # Don't need this anymore, toss it to free memory.
1441 # Don't need this anymore, toss it to free memory.
1406 del msng_filenode_set[fname]
1442 del msng_filenode_set[fname]
1407 # Signal that no more groups are left.
1443 # Signal that no more groups are left.
1408 yield changegroup.closechunk()
1444 yield changegroup.closechunk()
1409
1445
1410 if msng_cl_lst:
1446 if msng_cl_lst:
1411 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1447 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1412
1448
1413 return util.chunkbuffer(gengroup())
1449 return util.chunkbuffer(gengroup())
1414
1450
1415 def changegroup(self, basenodes, source):
1451 def changegroup(self, basenodes, source):
1416 """Generate a changegroup of all nodes that we have that a recipient
1452 """Generate a changegroup of all nodes that we have that a recipient
1417 doesn't.
1453 doesn't.
1418
1454
1419 This is much easier than the previous function as we can assume that
1455 This is much easier than the previous function as we can assume that
1420 the recipient has any changenode we aren't sending them."""
1456 the recipient has any changenode we aren't sending them."""
1421
1457
1422 self.hook('preoutgoing', throw=True, source=source)
1458 self.hook('preoutgoing', throw=True, source=source)
1423
1459
1424 cl = self.changelog
1460 cl = self.changelog
1425 nodes = cl.nodesbetween(basenodes, None)[0]
1461 nodes = cl.nodesbetween(basenodes, None)[0]
1426 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1462 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1427
1463
1428 def identity(x):
1464 def identity(x):
1429 return x
1465 return x
1430
1466
1431 def gennodelst(revlog):
1467 def gennodelst(revlog):
1432 for r in xrange(0, revlog.count()):
1468 for r in xrange(0, revlog.count()):
1433 n = revlog.node(r)
1469 n = revlog.node(r)
1434 if revlog.linkrev(n) in revset:
1470 if revlog.linkrev(n) in revset:
1435 yield n
1471 yield n
1436
1472
1437 def changed_file_collector(changedfileset):
1473 def changed_file_collector(changedfileset):
1438 def collect_changed_files(clnode):
1474 def collect_changed_files(clnode):
1439 c = cl.read(clnode)
1475 c = cl.read(clnode)
1440 for fname in c[3]:
1476 for fname in c[3]:
1441 changedfileset[fname] = 1
1477 changedfileset[fname] = 1
1442 return collect_changed_files
1478 return collect_changed_files
1443
1479
1444 def lookuprevlink_func(revlog):
1480 def lookuprevlink_func(revlog):
1445 def lookuprevlink(n):
1481 def lookuprevlink(n):
1446 return cl.node(revlog.linkrev(n))
1482 return cl.node(revlog.linkrev(n))
1447 return lookuprevlink
1483 return lookuprevlink
1448
1484
1449 def gengroup():
1485 def gengroup():
1450 # construct a list of all changed files
1486 # construct a list of all changed files
1451 changedfiles = {}
1487 changedfiles = {}
1452
1488
1453 for chnk in cl.group(nodes, identity,
1489 for chnk in cl.group(nodes, identity,
1454 changed_file_collector(changedfiles)):
1490 changed_file_collector(changedfiles)):
1455 yield chnk
1491 yield chnk
1456 changedfiles = changedfiles.keys()
1492 changedfiles = changedfiles.keys()
1457 changedfiles.sort()
1493 changedfiles.sort()
1458
1494
1459 mnfst = self.manifest
1495 mnfst = self.manifest
1460 nodeiter = gennodelst(mnfst)
1496 nodeiter = gennodelst(mnfst)
1461 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1497 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1462 yield chnk
1498 yield chnk
1463
1499
1464 for fname in changedfiles:
1500 for fname in changedfiles:
1465 filerevlog = self.file(fname)
1501 filerevlog = self.file(fname)
1466 nodeiter = gennodelst(filerevlog)
1502 nodeiter = gennodelst(filerevlog)
1467 nodeiter = list(nodeiter)
1503 nodeiter = list(nodeiter)
1468 if nodeiter:
1504 if nodeiter:
1469 yield changegroup.genchunk(fname)
1505 yield changegroup.genchunk(fname)
1470 lookup = lookuprevlink_func(filerevlog)
1506 lookup = lookuprevlink_func(filerevlog)
1471 for chnk in filerevlog.group(nodeiter, lookup):
1507 for chnk in filerevlog.group(nodeiter, lookup):
1472 yield chnk
1508 yield chnk
1473
1509
1474 yield changegroup.closechunk()
1510 yield changegroup.closechunk()
1475
1511
1476 if nodes:
1512 if nodes:
1477 self.hook('outgoing', node=hex(nodes[0]), source=source)
1513 self.hook('outgoing', node=hex(nodes[0]), source=source)
1478
1514
1479 return util.chunkbuffer(gengroup())
1515 return util.chunkbuffer(gengroup())
1480
1516
1481 def addchangegroup(self, source, srctype):
1517 def addchangegroup(self, source, srctype):
1482 """add changegroup to repo.
1518 """add changegroup to repo.
1483 returns number of heads modified or added + 1."""
1519 returns number of heads modified or added + 1."""
1484
1520
1485 def csmap(x):
1521 def csmap(x):
1486 self.ui.debug(_("add changeset %s\n") % short(x))
1522 self.ui.debug(_("add changeset %s\n") % short(x))
1487 return cl.count()
1523 return cl.count()
1488
1524
1489 def revmap(x):
1525 def revmap(x):
1490 return cl.rev(x)
1526 return cl.rev(x)
1491
1527
1492 if not source:
1528 if not source:
1493 return 0
1529 return 0
1494
1530
1495 self.hook('prechangegroup', throw=True, source=srctype)
1531 self.hook('prechangegroup', throw=True, source=srctype)
1496
1532
1497 changesets = files = revisions = 0
1533 changesets = files = revisions = 0
1498
1534
1499 tr = self.transaction()
1535 tr = self.transaction()
1500
1536
1501 # write changelog data to temp files so concurrent readers will not see
1537 # write changelog data to temp files so concurrent readers will not see
1502 # inconsistent view
1538 # inconsistent view
1503 cl = None
1539 cl = None
1504 try:
1540 try:
1505 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1541 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1506
1542
1507 oldheads = len(cl.heads())
1543 oldheads = len(cl.heads())
1508
1544
1509 # pull off the changeset group
1545 # pull off the changeset group
1510 self.ui.status(_("adding changesets\n"))
1546 self.ui.status(_("adding changesets\n"))
1511 cor = cl.count() - 1
1547 cor = cl.count() - 1
1512 chunkiter = changegroup.chunkiter(source)
1548 chunkiter = changegroup.chunkiter(source)
1513 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1549 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1514 raise util.Abort(_("received changelog group is empty"))
1550 raise util.Abort(_("received changelog group is empty"))
1515 cnr = cl.count() - 1
1551 cnr = cl.count() - 1
1516 changesets = cnr - cor
1552 changesets = cnr - cor
1517
1553
1518 # pull off the manifest group
1554 # pull off the manifest group
1519 self.ui.status(_("adding manifests\n"))
1555 self.ui.status(_("adding manifests\n"))
1520 chunkiter = changegroup.chunkiter(source)
1556 chunkiter = changegroup.chunkiter(source)
1521 # no need to check for empty manifest group here:
1557 # no need to check for empty manifest group here:
1522 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1558 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1523 # no new manifest will be created and the manifest group will
1559 # no new manifest will be created and the manifest group will
1524 # be empty during the pull
1560 # be empty during the pull
1525 self.manifest.addgroup(chunkiter, revmap, tr)
1561 self.manifest.addgroup(chunkiter, revmap, tr)
1526
1562
1527 # process the files
1563 # process the files
1528 self.ui.status(_("adding file changes\n"))
1564 self.ui.status(_("adding file changes\n"))
1529 while 1:
1565 while 1:
1530 f = changegroup.getchunk(source)
1566 f = changegroup.getchunk(source)
1531 if not f:
1567 if not f:
1532 break
1568 break
1533 self.ui.debug(_("adding %s revisions\n") % f)
1569 self.ui.debug(_("adding %s revisions\n") % f)
1534 fl = self.file(f)
1570 fl = self.file(f)
1535 o = fl.count()
1571 o = fl.count()
1536 chunkiter = changegroup.chunkiter(source)
1572 chunkiter = changegroup.chunkiter(source)
1537 if fl.addgroup(chunkiter, revmap, tr) is None:
1573 if fl.addgroup(chunkiter, revmap, tr) is None:
1538 raise util.Abort(_("received file revlog group is empty"))
1574 raise util.Abort(_("received file revlog group is empty"))
1539 revisions += fl.count() - o
1575 revisions += fl.count() - o
1540 files += 1
1576 files += 1
1541
1577
1542 cl.writedata()
1578 cl.writedata()
1543 finally:
1579 finally:
1544 if cl:
1580 if cl:
1545 cl.cleanup()
1581 cl.cleanup()
1546
1582
1547 # make changelog see real files again
1583 # make changelog see real files again
1548 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1584 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1549 self.changelog.checkinlinesize(tr)
1585 self.changelog.checkinlinesize(tr)
1550
1586
1551 newheads = len(self.changelog.heads())
1587 newheads = len(self.changelog.heads())
1552 heads = ""
1588 heads = ""
1553 if oldheads and newheads != oldheads:
1589 if oldheads and newheads != oldheads:
1554 heads = _(" (%+d heads)") % (newheads - oldheads)
1590 heads = _(" (%+d heads)") % (newheads - oldheads)
1555
1591
1556 self.ui.status(_("added %d changesets"
1592 self.ui.status(_("added %d changesets"
1557 " with %d changes to %d files%s\n")
1593 " with %d changes to %d files%s\n")
1558 % (changesets, revisions, files, heads))
1594 % (changesets, revisions, files, heads))
1559
1595
1560 if changesets > 0:
1596 if changesets > 0:
1561 self.hook('pretxnchangegroup', throw=True,
1597 self.hook('pretxnchangegroup', throw=True,
1562 node=hex(self.changelog.node(cor+1)), source=srctype)
1598 node=hex(self.changelog.node(cor+1)), source=srctype)
1563
1599
1564 tr.close()
1600 tr.close()
1565
1601
1566 if changesets > 0:
1602 if changesets > 0:
1567 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1603 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1568 source=srctype)
1604 source=srctype)
1569
1605
1570 for i in range(cor + 1, cnr + 1):
1606 for i in range(cor + 1, cnr + 1):
1571 self.hook("incoming", node=hex(self.changelog.node(i)),
1607 self.hook("incoming", node=hex(self.changelog.node(i)),
1572 source=srctype)
1608 source=srctype)
1573
1609
1574 return newheads - oldheads + 1
1610 return newheads - oldheads + 1
1575
1611
1576 def update(self, node, allow=False, force=False, choose=None,
1612 def update(self, node, allow=False, force=False, choose=None,
1577 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1613 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1578 pl = self.dirstate.parents()
1614 pl = self.dirstate.parents()
1579 if not force and pl[1] != nullid:
1615 if not force and pl[1] != nullid:
1580 raise util.Abort(_("outstanding uncommitted merges"))
1616 raise util.Abort(_("outstanding uncommitted merges"))
1581
1617
1582 err = False
1618 err = False
1583
1619
1584 p1, p2 = pl[0], node
1620 p1, p2 = pl[0], node
1585 pa = self.changelog.ancestor(p1, p2)
1621 pa = self.changelog.ancestor(p1, p2)
1586 m1n = self.changelog.read(p1)[0]
1622 m1n = self.changelog.read(p1)[0]
1587 m2n = self.changelog.read(p2)[0]
1623 m2n = self.changelog.read(p2)[0]
1588 man = self.manifest.ancestor(m1n, m2n)
1624 man = self.manifest.ancestor(m1n, m2n)
1589 m1 = self.manifest.read(m1n)
1625 m1 = self.manifest.read(m1n)
1590 mf1 = self.manifest.readflags(m1n)
1626 mf1 = self.manifest.readflags(m1n)
1591 m2 = self.manifest.read(m2n).copy()
1627 m2 = self.manifest.read(m2n).copy()
1592 mf2 = self.manifest.readflags(m2n)
1628 mf2 = self.manifest.readflags(m2n)
1593 ma = self.manifest.read(man)
1629 ma = self.manifest.read(man)
1594 mfa = self.manifest.readflags(man)
1630 mfa = self.manifest.readflags(man)
1595
1631
1596 modified, added, removed, deleted, unknown = self.changes()
1632 modified, added, removed, deleted, unknown = self.changes()
1597
1633
1598 # is this a jump, or a merge? i.e. is there a linear path
1634 # is this a jump, or a merge? i.e. is there a linear path
1599 # from p1 to p2?
1635 # from p1 to p2?
1600 linear_path = (pa == p1 or pa == p2)
1636 linear_path = (pa == p1 or pa == p2)
1601
1637
1602 if allow and linear_path:
1638 if allow and linear_path:
1603 raise util.Abort(_("there is nothing to merge, "
1639 raise util.Abort(_("there is nothing to merge, "
1604 "just use 'hg update'"))
1640 "just use 'hg update'"))
1605 if allow and not forcemerge:
1641 if allow and not forcemerge:
1606 if modified or added or removed:
1642 if modified or added or removed:
1607 raise util.Abort(_("outstanding uncommitted changes"))
1643 raise util.Abort(_("outstanding uncommitted changes"))
1608
1644
1609 if not forcemerge and not force:
1645 if not forcemerge and not force:
1610 for f in unknown:
1646 for f in unknown:
1611 if f in m2:
1647 if f in m2:
1612 t1 = self.wread(f)
1648 t1 = self.wread(f)
1613 t2 = self.file(f).read(m2[f])
1649 t2 = self.file(f).read(m2[f])
1614 if cmp(t1, t2) != 0:
1650 if cmp(t1, t2) != 0:
1615 raise util.Abort(_("'%s' already exists in the working"
1651 raise util.Abort(_("'%s' already exists in the working"
1616 " dir and differs from remote") % f)
1652 " dir and differs from remote") % f)
1617
1653
1618 # resolve the manifest to determine which files
1654 # resolve the manifest to determine which files
1619 # we care about merging
1655 # we care about merging
1620 self.ui.note(_("resolving manifests\n"))
1656 self.ui.note(_("resolving manifests\n"))
1621 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1657 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1622 (force, allow, moddirstate, linear_path))
1658 (force, allow, moddirstate, linear_path))
1623 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1659 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1624 (short(man), short(m1n), short(m2n)))
1660 (short(man), short(m1n), short(m2n)))
1625
1661
1626 merge = {}
1662 merge = {}
1627 get = {}
1663 get = {}
1628 remove = []
1664 remove = []
1629
1665
1630 # construct a working dir manifest
1666 # construct a working dir manifest
1631 mw = m1.copy()
1667 mw = m1.copy()
1632 mfw = mf1.copy()
1668 mfw = mf1.copy()
1633 umap = dict.fromkeys(unknown)
1669 umap = dict.fromkeys(unknown)
1634
1670
1635 for f in added + modified + unknown:
1671 for f in added + modified + unknown:
1636 mw[f] = ""
1672 mw[f] = ""
1637 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1673 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1638
1674
1639 if moddirstate and not wlock:
1675 if moddirstate and not wlock:
1640 wlock = self.wlock()
1676 wlock = self.wlock()
1641
1677
1642 for f in deleted + removed:
1678 for f in deleted + removed:
1643 if f in mw:
1679 if f in mw:
1644 del mw[f]
1680 del mw[f]
1645
1681
1646 # If we're jumping between revisions (as opposed to merging),
1682 # If we're jumping between revisions (as opposed to merging),
1647 # and if neither the working directory nor the target rev has
1683 # and if neither the working directory nor the target rev has
1648 # the file, then we need to remove it from the dirstate, to
1684 # the file, then we need to remove it from the dirstate, to
1649 # prevent the dirstate from listing the file when it is no
1685 # prevent the dirstate from listing the file when it is no
1650 # longer in the manifest.
1686 # longer in the manifest.
1651 if moddirstate and linear_path and f not in m2:
1687 if moddirstate and linear_path and f not in m2:
1652 self.dirstate.forget((f,))
1688 self.dirstate.forget((f,))
1653
1689
1654 # Compare manifests
1690 # Compare manifests
1655 for f, n in mw.iteritems():
1691 for f, n in mw.iteritems():
1656 if choose and not choose(f):
1692 if choose and not choose(f):
1657 continue
1693 continue
1658 if f in m2:
1694 if f in m2:
1659 s = 0
1695 s = 0
1660
1696
1661 # is the wfile new since m1, and match m2?
1697 # is the wfile new since m1, and match m2?
1662 if f not in m1:
1698 if f not in m1:
1663 t1 = self.wread(f)
1699 t1 = self.wread(f)
1664 t2 = self.file(f).read(m2[f])
1700 t2 = self.file(f).read(m2[f])
1665 if cmp(t1, t2) == 0:
1701 if cmp(t1, t2) == 0:
1666 n = m2[f]
1702 n = m2[f]
1667 del t1, t2
1703 del t1, t2
1668
1704
1669 # are files different?
1705 # are files different?
1670 if n != m2[f]:
1706 if n != m2[f]:
1671 a = ma.get(f, nullid)
1707 a = ma.get(f, nullid)
1672 # are both different from the ancestor?
1708 # are both different from the ancestor?
1673 if n != a and m2[f] != a:
1709 if n != a and m2[f] != a:
1674 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1710 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1675 # merge executable bits
1711 # merge executable bits
1676 # "if we changed or they changed, change in merge"
1712 # "if we changed or they changed, change in merge"
1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1713 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1678 mode = ((a^b) | (a^c)) ^ a
1714 mode = ((a^b) | (a^c)) ^ a
1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1715 merge[f] = (m1.get(f, nullid), m2[f], mode)
1680 s = 1
1716 s = 1
1681 # are we clobbering?
1717 # are we clobbering?
1682 # is remote's version newer?
1718 # is remote's version newer?
1683 # or are we going back in time?
1719 # or are we going back in time?
1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1720 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1685 self.ui.debug(_(" remote %s is newer, get\n") % f)
1721 self.ui.debug(_(" remote %s is newer, get\n") % f)
1686 get[f] = m2[f]
1722 get[f] = m2[f]
1687 s = 1
1723 s = 1
1688 elif f in umap or f in added:
1724 elif f in umap or f in added:
1689 # this unknown file is the same as the checkout
1725 # this unknown file is the same as the checkout
1690 # we need to reset the dirstate if the file was added
1726 # we need to reset the dirstate if the file was added
1691 get[f] = m2[f]
1727 get[f] = m2[f]
1692
1728
1693 if not s and mfw[f] != mf2[f]:
1729 if not s and mfw[f] != mf2[f]:
1694 if force:
1730 if force:
1695 self.ui.debug(_(" updating permissions for %s\n") % f)
1731 self.ui.debug(_(" updating permissions for %s\n") % f)
1696 util.set_exec(self.wjoin(f), mf2[f])
1732 util.set_exec(self.wjoin(f), mf2[f])
1697 else:
1733 else:
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1734 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1735 mode = ((a^b) | (a^c)) ^ a
1700 if mode != b:
1736 if mode != b:
1701 self.ui.debug(_(" updating permissions for %s\n")
1737 self.ui.debug(_(" updating permissions for %s\n")
1702 % f)
1738 % f)
1703 util.set_exec(self.wjoin(f), mode)
1739 util.set_exec(self.wjoin(f), mode)
1704 del m2[f]
1740 del m2[f]
1705 elif f in ma:
1741 elif f in ma:
1706 if n != ma[f]:
1742 if n != ma[f]:
1707 r = _("d")
1743 r = _("d")
1708 if not force and (linear_path or allow):
1744 if not force and (linear_path or allow):
1709 r = self.ui.prompt(
1745 r = self.ui.prompt(
1710 (_(" local changed %s which remote deleted\n") % f) +
1746 (_(" local changed %s which remote deleted\n") % f) +
1711 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1747 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1712 if r == _("d"):
1748 if r == _("d"):
1713 remove.append(f)
1749 remove.append(f)
1714 else:
1750 else:
1715 self.ui.debug(_("other deleted %s\n") % f)
1751 self.ui.debug(_("other deleted %s\n") % f)
1716 remove.append(f) # other deleted it
1752 remove.append(f) # other deleted it
1717 else:
1753 else:
1718 # file is created on branch or in working directory
1754 # file is created on branch or in working directory
1719 if force and f not in umap:
1755 if force and f not in umap:
1720 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1756 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1721 remove.append(f)
1757 remove.append(f)
1722 elif n == m1.get(f, nullid): # same as parent
1758 elif n == m1.get(f, nullid): # same as parent
1723 if p2 == pa: # going backwards?
1759 if p2 == pa: # going backwards?
1724 self.ui.debug(_("remote deleted %s\n") % f)
1760 self.ui.debug(_("remote deleted %s\n") % f)
1725 remove.append(f)
1761 remove.append(f)
1726 else:
1762 else:
1727 self.ui.debug(_("local modified %s, keeping\n") % f)
1763 self.ui.debug(_("local modified %s, keeping\n") % f)
1728 else:
1764 else:
1729 self.ui.debug(_("working dir created %s, keeping\n") % f)
1765 self.ui.debug(_("working dir created %s, keeping\n") % f)
1730
1766
1731 for f, n in m2.iteritems():
1767 for f, n in m2.iteritems():
1732 if choose and not choose(f):
1768 if choose and not choose(f):
1733 continue
1769 continue
1734 if f[0] == "/":
1770 if f[0] == "/":
1735 continue
1771 continue
1736 if f in ma and n != ma[f]:
1772 if f in ma and n != ma[f]:
1737 r = _("k")
1773 r = _("k")
1738 if not force and (linear_path or allow):
1774 if not force and (linear_path or allow):
1739 r = self.ui.prompt(
1775 r = self.ui.prompt(
1740 (_("remote changed %s which local deleted\n") % f) +
1776 (_("remote changed %s which local deleted\n") % f) +
1741 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1777 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1742 if r == _("k"):
1778 if r == _("k"):
1743 get[f] = n
1779 get[f] = n
1744 elif f not in ma:
1780 elif f not in ma:
1745 self.ui.debug(_("remote created %s\n") % f)
1781 self.ui.debug(_("remote created %s\n") % f)
1746 get[f] = n
1782 get[f] = n
1747 else:
1783 else:
1748 if force or p2 == pa: # going backwards?
1784 if force or p2 == pa: # going backwards?
1749 self.ui.debug(_("local deleted %s, recreating\n") % f)
1785 self.ui.debug(_("local deleted %s, recreating\n") % f)
1750 get[f] = n
1786 get[f] = n
1751 else:
1787 else:
1752 self.ui.debug(_("local deleted %s\n") % f)
1788 self.ui.debug(_("local deleted %s\n") % f)
1753
1789
1754 del mw, m1, m2, ma
1790 del mw, m1, m2, ma
1755
1791
1756 if force:
1792 if force:
1757 for f in merge:
1793 for f in merge:
1758 get[f] = merge[f][1]
1794 get[f] = merge[f][1]
1759 merge = {}
1795 merge = {}
1760
1796
1761 if linear_path or force:
1797 if linear_path or force:
1762 # we don't need to do any magic, just jump to the new rev
1798 # we don't need to do any magic, just jump to the new rev
1763 branch_merge = False
1799 branch_merge = False
1764 p1, p2 = p2, nullid
1800 p1, p2 = p2, nullid
1765 else:
1801 else:
1766 if not allow:
1802 if not allow:
1767 self.ui.status(_("this update spans a branch"
1803 self.ui.status(_("this update spans a branch"
1768 " affecting the following files:\n"))
1804 " affecting the following files:\n"))
1769 fl = merge.keys() + get.keys()
1805 fl = merge.keys() + get.keys()
1770 fl.sort()
1806 fl.sort()
1771 for f in fl:
1807 for f in fl:
1772 cf = ""
1808 cf = ""
1773 if f in merge:
1809 if f in merge:
1774 cf = _(" (resolve)")
1810 cf = _(" (resolve)")
1775 self.ui.status(" %s%s\n" % (f, cf))
1811 self.ui.status(" %s%s\n" % (f, cf))
1776 self.ui.warn(_("aborting update spanning branches!\n"))
1812 self.ui.warn(_("aborting update spanning branches!\n"))
1777 self.ui.status(_("(use 'hg merge' to merge across branches"
1813 self.ui.status(_("(use 'hg merge' to merge across branches"
1778 " or 'hg update -C' to lose changes)\n"))
1814 " or 'hg update -C' to lose changes)\n"))
1779 return 1
1815 return 1
1780 branch_merge = True
1816 branch_merge = True
1781
1817
1782 xp1 = hex(p1)
1818 xp1 = hex(p1)
1783 xp2 = hex(p2)
1819 xp2 = hex(p2)
1784 if p2 == nullid: xxp2 = ''
1820 if p2 == nullid: xxp2 = ''
1785 else: xxp2 = xp2
1821 else: xxp2 = xp2
1786
1822
1787 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1823 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1788
1824
1789 # get the files we don't need to change
1825 # get the files we don't need to change
1790 files = get.keys()
1826 files = get.keys()
1791 files.sort()
1827 files.sort()
1792 for f in files:
1828 for f in files:
1793 if f[0] == "/":
1829 if f[0] == "/":
1794 continue
1830 continue
1795 self.ui.note(_("getting %s\n") % f)
1831 self.ui.note(_("getting %s\n") % f)
1796 t = self.file(f).read(get[f])
1832 t = self.file(f).read(get[f])
1797 self.wwrite(f, t)
1833 self.wwrite(f, t)
1798 util.set_exec(self.wjoin(f), mf2[f])
1834 util.set_exec(self.wjoin(f), mf2[f])
1799 if moddirstate:
1835 if moddirstate:
1800 if branch_merge:
1836 if branch_merge:
1801 self.dirstate.update([f], 'n', st_mtime=-1)
1837 self.dirstate.update([f], 'n', st_mtime=-1)
1802 else:
1838 else:
1803 self.dirstate.update([f], 'n')
1839 self.dirstate.update([f], 'n')
1804
1840
1805 # merge the tricky bits
1841 # merge the tricky bits
1806 failedmerge = []
1842 failedmerge = []
1807 files = merge.keys()
1843 files = merge.keys()
1808 files.sort()
1844 files.sort()
1809 for f in files:
1845 for f in files:
1810 self.ui.status(_("merging %s\n") % f)
1846 self.ui.status(_("merging %s\n") % f)
1811 my, other, flag = merge[f]
1847 my, other, flag = merge[f]
1812 ret = self.merge3(f, my, other, xp1, xp2)
1848 ret = self.merge3(f, my, other, xp1, xp2)
1813 if ret:
1849 if ret:
1814 err = True
1850 err = True
1815 failedmerge.append(f)
1851 failedmerge.append(f)
1816 util.set_exec(self.wjoin(f), flag)
1852 util.set_exec(self.wjoin(f), flag)
1817 if moddirstate:
1853 if moddirstate:
1818 if branch_merge:
1854 if branch_merge:
1819 # We've done a branch merge, mark this file as merged
1855 # We've done a branch merge, mark this file as merged
1820 # so that we properly record the merger later
1856 # so that we properly record the merger later
1821 self.dirstate.update([f], 'm')
1857 self.dirstate.update([f], 'm')
1822 else:
1858 else:
1823 # We've update-merged a locally modified file, so
1859 # We've update-merged a locally modified file, so
1824 # we set the dirstate to emulate a normal checkout
1860 # we set the dirstate to emulate a normal checkout
1825 # of that file some time in the past. Thus our
1861 # of that file some time in the past. Thus our
1826 # merge will appear as a normal local file
1862 # merge will appear as a normal local file
1827 # modification.
1863 # modification.
1828 f_len = len(self.file(f).read(other))
1864 f_len = len(self.file(f).read(other))
1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1865 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1830
1866
1831 remove.sort()
1867 remove.sort()
1832 for f in remove:
1868 for f in remove:
1833 self.ui.note(_("removing %s\n") % f)
1869 self.ui.note(_("removing %s\n") % f)
1834 util.audit_path(f)
1870 util.audit_path(f)
1835 try:
1871 try:
1836 util.unlink(self.wjoin(f))
1872 util.unlink(self.wjoin(f))
1837 except OSError, inst:
1873 except OSError, inst:
1838 if inst.errno != errno.ENOENT:
1874 if inst.errno != errno.ENOENT:
1839 self.ui.warn(_("update failed to remove %s: %s!\n") %
1875 self.ui.warn(_("update failed to remove %s: %s!\n") %
1840 (f, inst.strerror))
1876 (f, inst.strerror))
1841 if moddirstate:
1877 if moddirstate:
1842 if branch_merge:
1878 if branch_merge:
1843 self.dirstate.update(remove, 'r')
1879 self.dirstate.update(remove, 'r')
1844 else:
1880 else:
1845 self.dirstate.forget(remove)
1881 self.dirstate.forget(remove)
1846
1882
1847 if moddirstate:
1883 if moddirstate:
1848 self.dirstate.setparents(p1, p2)
1884 self.dirstate.setparents(p1, p2)
1849
1885
1850 if show_stats:
1886 if show_stats:
1851 stats = ((len(get), _("updated")),
1887 stats = ((len(get), _("updated")),
1852 (len(merge) - len(failedmerge), _("merged")),
1888 (len(merge) - len(failedmerge), _("merged")),
1853 (len(remove), _("removed")),
1889 (len(remove), _("removed")),
1854 (len(failedmerge), _("unresolved")))
1890 (len(failedmerge), _("unresolved")))
1855 note = ", ".join([_("%d files %s") % s for s in stats])
1891 note = ", ".join([_("%d files %s") % s for s in stats])
1856 self.ui.status("%s\n" % note)
1892 self.ui.status("%s\n" % note)
1857 if moddirstate:
1893 if moddirstate:
1858 if branch_merge:
1894 if branch_merge:
1859 if failedmerge:
1895 if failedmerge:
1860 self.ui.status(_("There are unresolved merges,"
1896 self.ui.status(_("There are unresolved merges,"
1861 " you can redo the full merge using:\n"
1897 " you can redo the full merge using:\n"
1862 " hg update -C %s\n"
1898 " hg update -C %s\n"
1863 " hg merge %s\n"
1899 " hg merge %s\n"
1864 % (self.changelog.rev(p1),
1900 % (self.changelog.rev(p1),
1865 self.changelog.rev(p2))))
1901 self.changelog.rev(p2))))
1866 else:
1902 else:
1867 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1903 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1868 elif failedmerge:
1904 elif failedmerge:
1869 self.ui.status(_("There are unresolved merges with"
1905 self.ui.status(_("There are unresolved merges with"
1870 " locally modified files.\n"))
1906 " locally modified files.\n"))
1871
1907
1872 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1908 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1873 return err
1909 return err
1874
1910
1875 def merge3(self, fn, my, other, p1, p2):
1911 def merge3(self, fn, my, other, p1, p2):
1876 """perform a 3-way merge in the working directory"""
1912 """perform a 3-way merge in the working directory"""
1877
1913
1878 def temp(prefix, node):
1914 def temp(prefix, node):
1879 pre = "%s~%s." % (os.path.basename(fn), prefix)
1915 pre = "%s~%s." % (os.path.basename(fn), prefix)
1880 (fd, name) = tempfile.mkstemp(prefix=pre)
1916 (fd, name) = tempfile.mkstemp(prefix=pre)
1881 f = os.fdopen(fd, "wb")
1917 f = os.fdopen(fd, "wb")
1882 self.wwrite(fn, fl.read(node), f)
1918 self.wwrite(fn, fl.read(node), f)
1883 f.close()
1919 f.close()
1884 return name
1920 return name
1885
1921
1886 fl = self.file(fn)
1922 fl = self.file(fn)
1887 base = fl.ancestor(my, other)
1923 base = fl.ancestor(my, other)
1888 a = self.wjoin(fn)
1924 a = self.wjoin(fn)
1889 b = temp("base", base)
1925 b = temp("base", base)
1890 c = temp("other", other)
1926 c = temp("other", other)
1891
1927
1892 self.ui.note(_("resolving %s\n") % fn)
1928 self.ui.note(_("resolving %s\n") % fn)
1893 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1929 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1894 (fn, short(my), short(other), short(base)))
1930 (fn, short(my), short(other), short(base)))
1895
1931
1896 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1932 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1897 or "hgmerge")
1933 or "hgmerge")
1898 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1934 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1899 environ={'HG_FILE': fn,
1935 environ={'HG_FILE': fn,
1900 'HG_MY_NODE': p1,
1936 'HG_MY_NODE': p1,
1901 'HG_OTHER_NODE': p2,
1937 'HG_OTHER_NODE': p2,
1902 'HG_FILE_MY_NODE': hex(my),
1938 'HG_FILE_MY_NODE': hex(my),
1903 'HG_FILE_OTHER_NODE': hex(other),
1939 'HG_FILE_OTHER_NODE': hex(other),
1904 'HG_FILE_BASE_NODE': hex(base)})
1940 'HG_FILE_BASE_NODE': hex(base)})
1905 if r:
1941 if r:
1906 self.ui.warn(_("merging %s failed!\n") % fn)
1942 self.ui.warn(_("merging %s failed!\n") % fn)
1907
1943
1908 os.unlink(b)
1944 os.unlink(b)
1909 os.unlink(c)
1945 os.unlink(c)
1910 return r
1946 return r
1911
1947
1912 def verify(self):
1948 def verify(self):
1913 filelinkrevs = {}
1949 filelinkrevs = {}
1914 filenodes = {}
1950 filenodes = {}
1915 changesets = revisions = files = 0
1951 changesets = revisions = files = 0
1916 errors = [0]
1952 errors = [0]
1917 warnings = [0]
1953 warnings = [0]
1918 neededmanifests = {}
1954 neededmanifests = {}
1919
1955
1920 def err(msg):
1956 def err(msg):
1921 self.ui.warn(msg + "\n")
1957 self.ui.warn(msg + "\n")
1922 errors[0] += 1
1958 errors[0] += 1
1923
1959
1924 def warn(msg):
1960 def warn(msg):
1925 self.ui.warn(msg + "\n")
1961 self.ui.warn(msg + "\n")
1926 warnings[0] += 1
1962 warnings[0] += 1
1927
1963
1928 def checksize(obj, name):
1964 def checksize(obj, name):
1929 d = obj.checksize()
1965 d = obj.checksize()
1930 if d[0]:
1966 if d[0]:
1931 err(_("%s data length off by %d bytes") % (name, d[0]))
1967 err(_("%s data length off by %d bytes") % (name, d[0]))
1932 if d[1]:
1968 if d[1]:
1933 err(_("%s index contains %d extra bytes") % (name, d[1]))
1969 err(_("%s index contains %d extra bytes") % (name, d[1]))
1934
1970
1935 def checkversion(obj, name):
1971 def checkversion(obj, name):
1936 if obj.version != revlog.REVLOGV0:
1972 if obj.version != revlog.REVLOGV0:
1937 if not revlogv1:
1973 if not revlogv1:
1938 warn(_("warning: `%s' uses revlog format 1") % name)
1974 warn(_("warning: `%s' uses revlog format 1") % name)
1939 elif revlogv1:
1975 elif revlogv1:
1940 warn(_("warning: `%s' uses revlog format 0") % name)
1976 warn(_("warning: `%s' uses revlog format 0") % name)
1941
1977
1942 revlogv1 = self.revlogversion != revlog.REVLOGV0
1978 revlogv1 = self.revlogversion != revlog.REVLOGV0
1943 if self.ui.verbose or revlogv1 != self.revlogv1:
1979 if self.ui.verbose or revlogv1 != self.revlogv1:
1944 self.ui.status(_("repository uses revlog format %d\n") %
1980 self.ui.status(_("repository uses revlog format %d\n") %
1945 (revlogv1 and 1 or 0))
1981 (revlogv1 and 1 or 0))
1946
1982
1947 seen = {}
1983 seen = {}
1948 self.ui.status(_("checking changesets\n"))
1984 self.ui.status(_("checking changesets\n"))
1949 checksize(self.changelog, "changelog")
1985 checksize(self.changelog, "changelog")
1950
1986
1951 for i in range(self.changelog.count()):
1987 for i in range(self.changelog.count()):
1952 changesets += 1
1988 changesets += 1
1953 n = self.changelog.node(i)
1989 n = self.changelog.node(i)
1954 l = self.changelog.linkrev(n)
1990 l = self.changelog.linkrev(n)
1955 if l != i:
1991 if l != i:
1956 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1992 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1957 if n in seen:
1993 if n in seen:
1958 err(_("duplicate changeset at revision %d") % i)
1994 err(_("duplicate changeset at revision %d") % i)
1959 seen[n] = 1
1995 seen[n] = 1
1960
1996
1961 for p in self.changelog.parents(n):
1997 for p in self.changelog.parents(n):
1962 if p not in self.changelog.nodemap:
1998 if p not in self.changelog.nodemap:
1963 err(_("changeset %s has unknown parent %s") %
1999 err(_("changeset %s has unknown parent %s") %
1964 (short(n), short(p)))
2000 (short(n), short(p)))
1965 try:
2001 try:
1966 changes = self.changelog.read(n)
2002 changes = self.changelog.read(n)
1967 except KeyboardInterrupt:
2003 except KeyboardInterrupt:
1968 self.ui.warn(_("interrupted"))
2004 self.ui.warn(_("interrupted"))
1969 raise
2005 raise
1970 except Exception, inst:
2006 except Exception, inst:
1971 err(_("unpacking changeset %s: %s") % (short(n), inst))
2007 err(_("unpacking changeset %s: %s") % (short(n), inst))
1972 continue
2008 continue
1973
2009
1974 neededmanifests[changes[0]] = n
2010 neededmanifests[changes[0]] = n
1975
2011
1976 for f in changes[3]:
2012 for f in changes[3]:
1977 filelinkrevs.setdefault(f, []).append(i)
2013 filelinkrevs.setdefault(f, []).append(i)
1978
2014
1979 seen = {}
2015 seen = {}
1980 self.ui.status(_("checking manifests\n"))
2016 self.ui.status(_("checking manifests\n"))
1981 checkversion(self.manifest, "manifest")
2017 checkversion(self.manifest, "manifest")
1982 checksize(self.manifest, "manifest")
2018 checksize(self.manifest, "manifest")
1983
2019
1984 for i in range(self.manifest.count()):
2020 for i in range(self.manifest.count()):
1985 n = self.manifest.node(i)
2021 n = self.manifest.node(i)
1986 l = self.manifest.linkrev(n)
2022 l = self.manifest.linkrev(n)
1987
2023
1988 if l < 0 or l >= self.changelog.count():
2024 if l < 0 or l >= self.changelog.count():
1989 err(_("bad manifest link (%d) at revision %d") % (l, i))
2025 err(_("bad manifest link (%d) at revision %d") % (l, i))
1990
2026
1991 if n in neededmanifests:
2027 if n in neededmanifests:
1992 del neededmanifests[n]
2028 del neededmanifests[n]
1993
2029
1994 if n in seen:
2030 if n in seen:
1995 err(_("duplicate manifest at revision %d") % i)
2031 err(_("duplicate manifest at revision %d") % i)
1996
2032
1997 seen[n] = 1
2033 seen[n] = 1
1998
2034
1999 for p in self.manifest.parents(n):
2035 for p in self.manifest.parents(n):
2000 if p not in self.manifest.nodemap:
2036 if p not in self.manifest.nodemap:
2001 err(_("manifest %s has unknown parent %s") %
2037 err(_("manifest %s has unknown parent %s") %
2002 (short(n), short(p)))
2038 (short(n), short(p)))
2003
2039
2004 try:
2040 try:
2005 delta = mdiff.patchtext(self.manifest.delta(n))
2041 delta = mdiff.patchtext(self.manifest.delta(n))
2006 except KeyboardInterrupt:
2042 except KeyboardInterrupt:
2007 self.ui.warn(_("interrupted"))
2043 self.ui.warn(_("interrupted"))
2008 raise
2044 raise
2009 except Exception, inst:
2045 except Exception, inst:
2010 err(_("unpacking manifest %s: %s") % (short(n), inst))
2046 err(_("unpacking manifest %s: %s") % (short(n), inst))
2011 continue
2047 continue
2012
2048
2013 try:
2049 try:
2014 ff = [ l.split('\0') for l in delta.splitlines() ]
2050 ff = [ l.split('\0') for l in delta.splitlines() ]
2015 for f, fn in ff:
2051 for f, fn in ff:
2016 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2052 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2017 except (ValueError, TypeError), inst:
2053 except (ValueError, TypeError), inst:
2018 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2054 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2019
2055
2020 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2056 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2021
2057
2022 for m, c in neededmanifests.items():
2058 for m, c in neededmanifests.items():
2023 err(_("Changeset %s refers to unknown manifest %s") %
2059 err(_("Changeset %s refers to unknown manifest %s") %
2024 (short(m), short(c)))
2060 (short(m), short(c)))
2025 del neededmanifests
2061 del neededmanifests
2026
2062
2027 for f in filenodes:
2063 for f in filenodes:
2028 if f not in filelinkrevs:
2064 if f not in filelinkrevs:
2029 err(_("file %s in manifest but not in changesets") % f)
2065 err(_("file %s in manifest but not in changesets") % f)
2030
2066
2031 for f in filelinkrevs:
2067 for f in filelinkrevs:
2032 if f not in filenodes:
2068 if f not in filenodes:
2033 err(_("file %s in changeset but not in manifest") % f)
2069 err(_("file %s in changeset but not in manifest") % f)
2034
2070
2035 self.ui.status(_("checking files\n"))
2071 self.ui.status(_("checking files\n"))
2036 ff = filenodes.keys()
2072 ff = filenodes.keys()
2037 ff.sort()
2073 ff.sort()
2038 for f in ff:
2074 for f in ff:
2039 if f == "/dev/null":
2075 if f == "/dev/null":
2040 continue
2076 continue
2041 files += 1
2077 files += 1
2042 if not f:
2078 if not f:
2043 err(_("file without name in manifest %s") % short(n))
2079 err(_("file without name in manifest %s") % short(n))
2044 continue
2080 continue
2045 fl = self.file(f)
2081 fl = self.file(f)
2046 checkversion(fl, f)
2082 checkversion(fl, f)
2047 checksize(fl, f)
2083 checksize(fl, f)
2048
2084
2049 nodes = {nullid: 1}
2085 nodes = {nullid: 1}
2050 seen = {}
2086 seen = {}
2051 for i in range(fl.count()):
2087 for i in range(fl.count()):
2052 revisions += 1
2088 revisions += 1
2053 n = fl.node(i)
2089 n = fl.node(i)
2054
2090
2055 if n in seen:
2091 if n in seen:
2056 err(_("%s: duplicate revision %d") % (f, i))
2092 err(_("%s: duplicate revision %d") % (f, i))
2057 if n not in filenodes[f]:
2093 if n not in filenodes[f]:
2058 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2094 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2059 else:
2095 else:
2060 del filenodes[f][n]
2096 del filenodes[f][n]
2061
2097
2062 flr = fl.linkrev(n)
2098 flr = fl.linkrev(n)
2063 if flr not in filelinkrevs.get(f, []):
2099 if flr not in filelinkrevs.get(f, []):
2064 err(_("%s:%s points to unexpected changeset %d")
2100 err(_("%s:%s points to unexpected changeset %d")
2065 % (f, short(n), flr))
2101 % (f, short(n), flr))
2066 else:
2102 else:
2067 filelinkrevs[f].remove(flr)
2103 filelinkrevs[f].remove(flr)
2068
2104
2069 # verify contents
2105 # verify contents
2070 try:
2106 try:
2071 t = fl.read(n)
2107 t = fl.read(n)
2072 except KeyboardInterrupt:
2108 except KeyboardInterrupt:
2073 self.ui.warn(_("interrupted"))
2109 self.ui.warn(_("interrupted"))
2074 raise
2110 raise
2075 except Exception, inst:
2111 except Exception, inst:
2076 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2112 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2077
2113
2078 # verify parents
2114 # verify parents
2079 (p1, p2) = fl.parents(n)
2115 (p1, p2) = fl.parents(n)
2080 if p1 not in nodes:
2116 if p1 not in nodes:
2081 err(_("file %s:%s unknown parent 1 %s") %
2117 err(_("file %s:%s unknown parent 1 %s") %
2082 (f, short(n), short(p1)))
2118 (f, short(n), short(p1)))
2083 if p2 not in nodes:
2119 if p2 not in nodes:
2084 err(_("file %s:%s unknown parent 2 %s") %
2120 err(_("file %s:%s unknown parent 2 %s") %
2085 (f, short(n), short(p1)))
2121 (f, short(n), short(p1)))
2086 nodes[n] = 1
2122 nodes[n] = 1
2087
2123
2088 # cross-check
2124 # cross-check
2089 for node in filenodes[f]:
2125 for node in filenodes[f]:
2090 err(_("node %s in manifests not in %s") % (hex(node), f))
2126 err(_("node %s in manifests not in %s") % (hex(node), f))
2091
2127
2092 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2128 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2093 (files, changesets, revisions))
2129 (files, changesets, revisions))
2094
2130
2095 if warnings[0]:
2131 if warnings[0]:
2096 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2132 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2097 if errors[0]:
2133 if errors[0]:
2098 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2134 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2099 return 1
2135 return 1
2100
2136
2101 # used to avoid circular references so destructors work
2137 # used to avoid circular references so destructors work
2102 def aftertrans(base):
2138 def aftertrans(base):
2103 p = base
2139 p = base
2104 def a():
2140 def a():
2105 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2141 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2106 util.rename(os.path.join(p, "journal.dirstate"),
2142 util.rename(os.path.join(p, "journal.dirstate"),
2107 os.path.join(p, "undo.dirstate"))
2143 os.path.join(p, "undo.dirstate"))
2108 return a
2144 return a
2109
2145
@@ -1,162 +1,187 b''
1 # sshrepo.py - ssh repository proxy class for mercurial
1 # sshrepo.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from remoterepo import *
9 from remoterepo import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "hg os re stat util")
12 demandload(globals(), "hg os re stat util")
13
13
14 class sshrepository(remoterepository):
14 class sshrepository(remoterepository):
15 def __init__(self, ui, path):
15 def __init__(self, ui, path):
16 self.url = path
16 self.url = path
17 self.ui = ui
17 self.ui = ui
18
18
19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
20 if not m:
20 if not m:
21 raise hg.RepoError(_("couldn't parse destination %s") % path)
21 raise hg.RepoError(_("couldn't parse destination %s") % path)
22
22
23 self.user = m.group(2)
23 self.user = m.group(2)
24 self.host = m.group(3)
24 self.host = m.group(3)
25 self.port = m.group(5)
25 self.port = m.group(5)
26 self.path = m.group(7) or "."
26 self.path = m.group(7) or "."
27
27
28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
29 args = self.port and ("%s -p %s") % (args, self.port) or args
29 args = self.port and ("%s -p %s") % (args, self.port) or args
30
30
31 sshcmd = self.ui.config("ui", "ssh", "ssh")
31 sshcmd = self.ui.config("ui", "ssh", "ssh")
32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
33 cmd = '%s %s "%s -R %s serve --stdio"'
33 cmd = '%s %s "%s -R %s serve --stdio"'
34 cmd = cmd % (sshcmd, args, remotecmd, self.path)
34 cmd = cmd % (sshcmd, args, remotecmd, self.path)
35
35
36 ui.note('running %s\n' % cmd)
36 ui.note('running %s\n' % cmd)
37 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
37 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
38
38
39 # skip any noise generated by remote shell
39 # skip any noise generated by remote shell
40 self.do_cmd("hello")
40 self.do_cmd("hello")
41 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
41 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
42 lines = ["", "dummy"]
42 lines = ["", "dummy"]
43 max_noise = 500
43 max_noise = 500
44 while lines[-1] and max_noise:
44 while lines[-1] and max_noise:
45 l = r.readline()
45 l = r.readline()
46 self.readerr()
46 self.readerr()
47 if lines[-1] == "1\n" and l == "\n":
47 if lines[-1] == "1\n" and l == "\n":
48 break
48 break
49 if l:
49 if l:
50 ui.debug(_("remote: "), l)
50 ui.debug(_("remote: "), l)
51 lines.append(l)
51 lines.append(l)
52 max_noise -= 1
52 max_noise -= 1
53 else:
53 else:
54 if l1:
54 if l1:
55 ui.debug(_("remote: "), l1)
55 ui.debug(_("remote: "), l1)
56 raise hg.RepoError(_("no response from remote hg"))
56 raise hg.RepoError(_("no response from remote hg"))
57
57
58 self.capabilities = ()
58 self.capabilities = ()
59 lines.reverse()
59 lines.reverse()
60 for l in lines:
60 for l in lines:
61 if l.startswith("capabilities:"):
61 if l.startswith("capabilities:"):
62 self.capabilities = l[:-1].split(":")[1].split()
62 self.capabilities = l[:-1].split(":")[1].split()
63 break
63 break
64
64
65 def readerr(self):
65 def readerr(self):
66 while 1:
66 while 1:
67 size = util.fstat(self.pipee).st_size
67 size = util.fstat(self.pipee).st_size
68 if size == 0: break
68 if size == 0: break
69 l = self.pipee.readline()
69 l = self.pipee.readline()
70 if not l: break
70 if not l: break
71 self.ui.status(_("remote: "), l)
71 self.ui.status(_("remote: "), l)
72
72
73 def __del__(self):
73 def __del__(self):
74 try:
74 try:
75 self.pipeo.close()
75 self.pipeo.close()
76 self.pipei.close()
76 self.pipei.close()
77 # read the error descriptor until EOF
77 # read the error descriptor until EOF
78 for l in self.pipee:
78 for l in self.pipee:
79 self.ui.status(_("remote: "), l)
79 self.ui.status(_("remote: "), l)
80 self.pipee.close()
80 self.pipee.close()
81 except:
81 except:
82 pass
82 pass
83
83
84 def dev(self):
84 def dev(self):
85 return -1
85 return -1
86
86
87 def do_cmd(self, cmd, **args):
87 def do_cmd(self, cmd, **args):
88 self.ui.debug(_("sending %s command\n") % cmd)
88 self.ui.debug(_("sending %s command\n") % cmd)
89 self.pipeo.write("%s\n" % cmd)
89 self.pipeo.write("%s\n" % cmd)
90 for k, v in args.items():
90 for k, v in args.items():
91 self.pipeo.write("%s %d\n" % (k, len(v)))
91 self.pipeo.write("%s %d\n" % (k, len(v)))
92 self.pipeo.write(v)
92 self.pipeo.write(v)
93 self.pipeo.flush()
93 self.pipeo.flush()
94
94
95 return self.pipei
95 return self.pipei
96
96
97 def call(self, cmd, **args):
97 def call(self, cmd, **args):
98 r = self.do_cmd(cmd, **args)
98 r = self.do_cmd(cmd, **args)
99 l = r.readline()
99 l = r.readline()
100 self.readerr()
100 self.readerr()
101 try:
101 try:
102 l = int(l)
102 l = int(l)
103 except:
103 except:
104 raise hg.RepoError(_("unexpected response '%s'") % l)
104 raise hg.RepoError(_("unexpected response '%s'") % l)
105 return r.read(l)
105 return r.read(l)
106
106
107 def lock(self):
107 def lock(self):
108 self.call("lock")
108 self.call("lock")
109 return remotelock(self)
109 return remotelock(self)
110
110
111 def unlock(self):
111 def unlock(self):
112 self.call("unlock")
112 self.call("unlock")
113
113
114 def heads(self):
114 def heads(self):
115 d = self.call("heads")
115 d = self.call("heads")
116 try:
116 try:
117 return map(bin, d[:-1].split(" "))
117 return map(bin, d[:-1].split(" "))
118 except:
118 except:
119 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
119 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
120
120
121 def branches(self, nodes):
121 def branches(self, nodes):
122 n = " ".join(map(hex, nodes))
122 n = " ".join(map(hex, nodes))
123 d = self.call("branches", nodes=n)
123 d = self.call("branches", nodes=n)
124 try:
124 try:
125 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
125 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
126 return br
126 return br
127 except:
127 except:
128 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
128 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
129
129
130 def between(self, pairs):
130 def between(self, pairs):
131 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
131 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
132 d = self.call("between", pairs=n)
132 d = self.call("between", pairs=n)
133 try:
133 try:
134 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
134 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
135 return p
135 return p
136 except:
136 except:
137 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
137 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
138
138
139 def changegroup(self, nodes, kind):
139 def changegroup(self, nodes, kind):
140 n = " ".join(map(hex, nodes))
140 n = " ".join(map(hex, nodes))
141 f = self.do_cmd("changegroup", roots=n)
141 f = self.do_cmd("changegroup", roots=n)
142 return self.pipei
142 return self.pipei
143
143
144 def unbundle(self, cg, heads, source):
145 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
146 if d:
147 raise hg.RepoError(_("push refused: %s") % d)
148
149 while 1:
150 d = cg.read(4096)
151 if not d: break
152 self.pipeo.write(str(len(d)) + '\n')
153 self.pipeo.write(d)
154 self.readerr()
155
156 self.pipeo.write('0\n')
157 self.pipeo.flush()
158
159 self.readerr()
160 d = self.pipei.readline()
161 if d != '\n':
162 return 1
163
164 l = int(self.pipei.readline())
165 r = self.pipei.read(l)
166 if not r:
167 return 1
168 return int(r)
169
144 def addchangegroup(self, cg, source):
170 def addchangegroup(self, cg, source):
145 d = self.call("addchangegroup")
171 d = self.call("addchangegroup")
146 if d:
172 if d:
147 raise hg.RepoError(_("push refused: %s"), d)
173 raise hg.RepoError(_("push refused: %s") % d)
148
149 while 1:
174 while 1:
150 d = cg.read(4096)
175 d = cg.read(4096)
151 if not d: break
176 if not d: break
152 self.pipeo.write(d)
177 self.pipeo.write(d)
153 self.readerr()
178 self.readerr()
154
179
155 self.pipeo.flush()
180 self.pipeo.flush()
156
181
157 self.readerr()
182 self.readerr()
158 l = int(self.pipei.readline())
183 l = int(self.pipei.readline())
159 r = self.pipei.read(l)
184 r = self.pipei.read(l)
160 if not r:
185 if not r:
161 return 1
186 return 1
162 return int(r)
187 return int(r)
@@ -1,113 +1,169 b''
1 # sshserver.py - ssh protocol server support for mercurial
1 # sshserver.py - ssh protocol server support for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import demandload
8 from demandload import demandload
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from node import *
10 from node import *
11 demandload(globals(), "sys util")
11 demandload(globals(), "os sys tempfile util")
12
12
13 class sshserver(object):
13 class sshserver(object):
14 def __init__(self, ui, repo):
14 def __init__(self, ui, repo):
15 self.ui = ui
15 self.ui = ui
16 self.repo = repo
16 self.repo = repo
17 self.lock = None
17 self.lock = None
18 self.fin = sys.stdin
18 self.fin = sys.stdin
19 self.fout = sys.stdout
19 self.fout = sys.stdout
20
20
21 sys.stdout = sys.stderr
21 sys.stdout = sys.stderr
22
22
23 # Prevent insertion/deletion of CRs
23 # Prevent insertion/deletion of CRs
24 util.set_binary(self.fin)
24 util.set_binary(self.fin)
25 util.set_binary(self.fout)
25 util.set_binary(self.fout)
26
26
27 def getarg(self):
27 def getarg(self):
28 argline = self.fin.readline()[:-1]
28 argline = self.fin.readline()[:-1]
29 arg, l = argline.split()
29 arg, l = argline.split()
30 val = self.fin.read(int(l))
30 val = self.fin.read(int(l))
31 return arg, val
31 return arg, val
32
32
33 def respond(self, v):
33 def respond(self, v):
34 self.fout.write("%d\n" % len(v))
34 self.fout.write("%d\n" % len(v))
35 self.fout.write(v)
35 self.fout.write(v)
36 self.fout.flush()
36 self.fout.flush()
37
37
38 def serve_forever(self):
38 def serve_forever(self):
39 while self.serve_one(): pass
39 while self.serve_one(): pass
40 sys.exit(0)
40 sys.exit(0)
41
41
42 def serve_one(self):
42 def serve_one(self):
43 cmd = self.fin.readline()[:-1]
43 cmd = self.fin.readline()[:-1]
44 if cmd:
44 if cmd:
45 impl = getattr(self, 'do_' + cmd, None)
45 impl = getattr(self, 'do_' + cmd, None)
46 if impl: impl()
46 if impl: impl()
47 else: self.respond("")
47 else: self.respond("")
48 return cmd != ''
48 return cmd != ''
49
49
50 def do_heads(self):
50 def do_heads(self):
51 h = self.repo.heads()
51 h = self.repo.heads()
52 self.respond(" ".join(map(hex, h)) + "\n")
52 self.respond(" ".join(map(hex, h)) + "\n")
53
53
54 def do_hello(self):
54 def do_hello(self):
55 '''the hello command returns a set of lines describing various
55 '''the hello command returns a set of lines describing various
56 interesting things about the server, in an RFC822-like format.
56 interesting things about the server, in an RFC822-like format.
57 Currently the only one defined is "capabilities", which
57 Currently the only one defined is "capabilities", which
58 consists of a line in the form:
58 consists of a line in the form:
59
59
60 capabilities: space separated list of tokens
60 capabilities: space separated list of tokens
61 '''
61 '''
62
62
63 r = "capabilities:\n"
63 r = "capabilities: unbundle\n"
64 self.respond(r)
64 self.respond(r)
65
65
66 def do_lock(self):
66 def do_lock(self):
67 '''DEPRECATED - allowing remote client to lock repo is not safe'''
68
67 self.lock = self.repo.lock()
69 self.lock = self.repo.lock()
68 self.respond("")
70 self.respond("")
69
71
70 def do_unlock(self):
72 def do_unlock(self):
73 '''DEPRECATED'''
74
71 if self.lock:
75 if self.lock:
72 self.lock.release()
76 self.lock.release()
73 self.lock = None
77 self.lock = None
74 self.respond("")
78 self.respond("")
75
79
76 def do_branches(self):
80 def do_branches(self):
77 arg, nodes = self.getarg()
81 arg, nodes = self.getarg()
78 nodes = map(bin, nodes.split(" "))
82 nodes = map(bin, nodes.split(" "))
79 r = []
83 r = []
80 for b in self.repo.branches(nodes):
84 for b in self.repo.branches(nodes):
81 r.append(" ".join(map(hex, b)) + "\n")
85 r.append(" ".join(map(hex, b)) + "\n")
82 self.respond("".join(r))
86 self.respond("".join(r))
83
87
84 def do_between(self):
88 def do_between(self):
85 arg, pairs = self.getarg()
89 arg, pairs = self.getarg()
86 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
90 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
87 r = []
91 r = []
88 for b in self.repo.between(pairs):
92 for b in self.repo.between(pairs):
89 r.append(" ".join(map(hex, b)) + "\n")
93 r.append(" ".join(map(hex, b)) + "\n")
90 self.respond("".join(r))
94 self.respond("".join(r))
91
95
92 def do_changegroup(self):
96 def do_changegroup(self):
93 nodes = []
97 nodes = []
94 arg, roots = self.getarg()
98 arg, roots = self.getarg()
95 nodes = map(bin, roots.split(" "))
99 nodes = map(bin, roots.split(" "))
96
100
97 cg = self.repo.changegroup(nodes, 'serve')
101 cg = self.repo.changegroup(nodes, 'serve')
98 while True:
102 while True:
99 d = cg.read(4096)
103 d = cg.read(4096)
100 if not d:
104 if not d:
101 break
105 break
102 self.fout.write(d)
106 self.fout.write(d)
103
107
104 self.fout.flush()
108 self.fout.flush()
105
109
106 def do_addchangegroup(self):
110 def do_addchangegroup(self):
111 '''DEPRECATED'''
112
107 if not self.lock:
113 if not self.lock:
108 self.respond("not locked")
114 self.respond("not locked")
109 return
115 return
110
116
111 self.respond("")
117 self.respond("")
112 r = self.repo.addchangegroup(self.fin, 'serve')
118 r = self.repo.addchangegroup(self.fin, 'serve')
113 self.respond(str(r))
119 self.respond(str(r))
120
121 def do_unbundle(self):
122 their_heads = self.getarg()[1].split()
123
124 def check_heads():
125 heads = map(hex, self.repo.heads())
126 return their_heads == [hex('force')] or their_heads == heads
127
128 # fail early if possible
129 if not check_heads():
130 self.respond(_('unsynced changes'))
131 return
132
133 self.respond('')
134
135 # write bundle data to temporary file because it can be big
136
137 try:
138 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
139 fp = os.fdopen(fd, 'wb+')
140
141 count = int(self.fin.readline())
142 while count:
143 fp.write(self.fin.read(count))
144 count = int(self.fin.readline())
145
146 was_locked = self.lock is not None
147 if not was_locked:
148 self.lock = self.repo.lock()
149 try:
150 if not check_heads():
151 # someone else committed/pushed/unbundled while we
152 # were transferring data
153 self.respond(_('unsynced changes'))
154 return
155 self.respond('')
156
157 # push can proceed
158
159 fp.seek(0)
160 r = self.repo.addchangegroup(fp, 'serve')
161 self.respond(str(r))
162 finally:
163 if not was_locked:
164 self.lock.release()
165 self.lock = None
166 finally:
167 fp.close()
168 os.unlink(tempname)
169
@@ -1,70 +1,83 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 $2
21 $2
22 EOF
22 EOF
23 chmod +x dummyssh
23 chmod +x dummyssh
24
24
25 echo "# creating 'remote'"
25 echo "# creating 'remote'"
26 hg init remote
26 hg init remote
27 cd remote
27 cd remote
28 echo this > foo
28 echo this > foo
29 hg ci -A -m "init" -d "1000000 0" foo
29 hg ci -A -m "init" -d "1000000 0" foo
30
30
31 cd ..
31 cd ..
32
32
33 echo "# clone remote"
33 echo "# clone remote"
34 hg clone -e ./dummyssh ssh://user@dummy/remote local
34 hg clone -e ./dummyssh ssh://user@dummy/remote local
35
35
36 echo "# verify"
36 echo "# verify"
37 cd local
37 cd local
38 hg verify
38 hg verify
39
39
40 echo "# empty default pull"
40 echo "# empty default pull"
41 hg paths
41 hg paths
42 hg pull -e ../dummyssh
42 hg pull -e ../dummyssh
43
43
44 echo "# local change"
44 echo "# local change"
45 echo bleah > foo
45 echo bleah > foo
46 hg ci -m "add" -d "1000000 0"
46 hg ci -m "add" -d "1000000 0"
47
47
48 echo "# updating rc"
48 echo "# updating rc"
49 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
49 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
50 echo "[ui]" >> .hg/hgrc
50 echo "[ui]" >> .hg/hgrc
51 echo "ssh = ../dummyssh" >> .hg/hgrc
51 echo "ssh = ../dummyssh" >> .hg/hgrc
52
52
53 echo "# find outgoing"
53 echo "# find outgoing"
54 hg out ssh://user@dummy/remote
54 hg out ssh://user@dummy/remote
55
55
56 echo "# find incoming on the remote side"
56 echo "# find incoming on the remote side"
57 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
57 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
58
58
59 echo "# push"
59 echo "# push"
60 hg push
60 hg push
61
61
62 cd ../remote
62 cd ../remote
63
63
64 echo "# check remote tip"
64 echo "# check remote tip"
65 hg tip
65 hg tip
66 hg verify
66 hg verify
67 hg cat foo
67 hg cat foo
68
68
69 echo z > z
70 hg ci -A -m z -d '1000001 0' z
71
72 cd ../local
73 echo r > r
74 hg ci -A -m z -d '1000002 0' r
75
76 echo "# push should fail"
77 hg push
78
79 echo "# push should succeed"
80 hg push -f
81
69 cd ..
82 cd ..
70 cat dummylog
83 cat dummylog
@@ -1,62 +1,76 b''
1 # creating 'remote'
1 # creating 'remote'
2 # clone remote
2 # clone remote
3 requesting all changes
3 requesting all changes
4 adding changesets
4 adding changesets
5 adding manifests
5 adding manifests
6 adding file changes
6 adding file changes
7 added 1 changesets with 1 changes to 1 files
7 added 1 changesets with 1 changes to 1 files
8 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
9 # verify
9 # verify
10 checking changesets
10 checking changesets
11 checking manifests
11 checking manifests
12 crosschecking files in changesets and manifests
12 crosschecking files in changesets and manifests
13 checking files
13 checking files
14 1 files, 1 changesets, 1 total revisions
14 1 files, 1 changesets, 1 total revisions
15 # empty default pull
15 # empty default pull
16 default = ssh://user@dummy/remote
16 default = ssh://user@dummy/remote
17 pulling from ssh://user@dummy/remote
17 pulling from ssh://user@dummy/remote
18 searching for changes
18 searching for changes
19 no changes found
19 no changes found
20 # local change
20 # local change
21 # updating rc
21 # updating rc
22 # find outgoing
22 # find outgoing
23 searching for changes
23 searching for changes
24 changeset: 1:c54836a570be
24 changeset: 1:c54836a570be
25 tag: tip
25 tag: tip
26 user: test
26 user: test
27 date: Mon Jan 12 13:46:40 1970 +0000
27 date: Mon Jan 12 13:46:40 1970 +0000
28 summary: add
28 summary: add
29
29
30 # find incoming on the remote side
30 # find incoming on the remote side
31 searching for changes
31 searching for changes
32 changeset: 1:c54836a570be
32 changeset: 1:c54836a570be
33 tag: tip
33 tag: tip
34 user: test
34 user: test
35 date: Mon Jan 12 13:46:40 1970 +0000
35 date: Mon Jan 12 13:46:40 1970 +0000
36 summary: add
36 summary: add
37
37
38 # push
38 # push
39 pushing to ssh://user@dummy/remote
39 pushing to ssh://user@dummy/remote
40 searching for changes
40 searching for changes
41 remote: adding changesets
41 remote: adding changesets
42 remote: adding manifests
42 remote: adding manifests
43 remote: adding file changes
43 remote: adding file changes
44 remote: added 1 changesets with 1 changes to 1 files
44 remote: added 1 changesets with 1 changes to 1 files
45 # check remote tip
45 # check remote tip
46 changeset: 1:c54836a570be
46 changeset: 1:c54836a570be
47 tag: tip
47 tag: tip
48 user: test
48 user: test
49 date: Mon Jan 12 13:46:40 1970 +0000
49 date: Mon Jan 12 13:46:40 1970 +0000
50 summary: add
50 summary: add
51
51
52 checking changesets
52 checking changesets
53 checking manifests
53 checking manifests
54 crosschecking files in changesets and manifests
54 crosschecking files in changesets and manifests
55 checking files
55 checking files
56 1 files, 2 changesets, 2 total revisions
56 1 files, 2 changesets, 2 total revisions
57 bleah
57 bleah
58 # push should fail
59 pushing to ssh://user@dummy/remote
60 searching for changes
61 abort: unsynced remote changes!
62 (did you forget to sync? use push -f to force)
63 # push should succeed
64 pushing to ssh://user@dummy/remote
65 searching for changes
66 remote: adding changesets
67 remote: adding manifests
68 remote: adding file changes
69 remote: added 1 changesets with 1 changes to 1 files
58 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
70 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
59 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
71 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
60 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
72 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
61 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
73 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
62 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
74 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
75 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
76 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
General Comments 0
You need to be logged in to leave comments. Login now