##// END OF EJS Templates
extend network protocol to stop clients from locking servers...
Vadim Gelfer -
r2439:e8c4f3d3 default
parent child Browse files
Show More
@@ -1,238 +1,242 b''
1 1 # httprepo.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from remoterepo import *
10 10 from i18n import gettext as _
11 11 from demandload import *
12 12 demandload(globals(), "hg os urllib urllib2 urlparse zlib util httplib")
13 13 demandload(globals(), "keepalive")
14 14
15 15 class passwordmgr(urllib2.HTTPPasswordMgr):
16 16 def __init__(self, ui):
17 17 urllib2.HTTPPasswordMgr.__init__(self)
18 18 self.ui = ui
19 19
20 20 def find_user_password(self, realm, authuri):
21 21 authinfo = urllib2.HTTPPasswordMgr.find_user_password(
22 22 self, realm, authuri)
23 23 if authinfo != (None, None):
24 24 return authinfo
25 25
26 26 if not ui.interactive:
27 27 raise util.Abort(_('http authorization required'))
28 28
29 29 self.ui.write(_("http authorization required\n"))
30 30 self.ui.status(_("realm: %s\n") % realm)
31 31 user = self.ui.prompt(_("user:"), default=None)
32 32 passwd = self.ui.getpass()
33 33
34 34 self.add_password(realm, authuri, user, passwd)
35 35 return (user, passwd)
36 36
37 37 def netlocsplit(netloc):
38 38 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
39 39
40 40 a = netloc.find('@')
41 41 if a == -1:
42 42 user, passwd = None, None
43 43 else:
44 44 userpass, netloc = netloc[:a], netloc[a+1:]
45 45 c = userpass.find(':')
46 46 if c == -1:
47 47 user, passwd = urllib.unquote(userpass), None
48 48 else:
49 49 user = urllib.unquote(userpass[:c])
50 50 passwd = urllib.unquote(userpass[c+1:])
51 51 c = netloc.find(':')
52 52 if c == -1:
53 53 host, port = netloc, None
54 54 else:
55 55 host, port = netloc[:c], netloc[c+1:]
56 56 return host, port, user, passwd
57 57
58 58 def netlocunsplit(host, port, user=None, passwd=None):
59 59 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
60 60 if port:
61 61 hostport = host + ':' + port
62 62 else:
63 63 hostport = host
64 64 if user:
65 65 if passwd:
66 66 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
67 67 else:
68 68 userpass = urllib.quote(user)
69 69 return userpass + '@' + hostport
70 70 return hostport
71 71
72 72 class httprepository(remoterepository):
73 73 def __init__(self, ui, path):
74 self.capabilities = ()
74 75 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
75 76 if query or frag:
76 77 raise util.Abort(_('unsupported URL component: "%s"') %
77 78 (query or frag))
78 79 if not urlpath: urlpath = '/'
79 80 host, port, user, passwd = netlocsplit(netloc)
80 81
81 82 # urllib cannot handle URLs with embedded user or passwd
82 83 self.url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
83 84 urlpath, '', ''))
84 85 self.ui = ui
85 86
86 87 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
87 88 proxyauthinfo = None
88 89 handler = keepalive.HTTPHandler()
89 90
90 91 if proxyurl:
91 92 # proxy can be proper url or host[:port]
92 93 if not (proxyurl.startswith('http:') or
93 94 proxyurl.startswith('https:')):
94 95 proxyurl = 'http://' + proxyurl + '/'
95 96 snpqf = urlparse.urlsplit(proxyurl)
96 97 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
97 98 hpup = netlocsplit(proxynetloc)
98 99
99 100 proxyhost, proxyport, proxyuser, proxypasswd = hpup
100 101 if not proxyuser:
101 102 proxyuser = ui.config("http_proxy", "user")
102 103 proxypasswd = ui.config("http_proxy", "passwd")
103 104
104 105 # see if we should use a proxy for this url
105 106 no_list = [ "localhost", "127.0.0.1" ]
106 107 no_list.extend([p.strip().lower() for
107 108 p in ui.config("http_proxy", "no", '').split(',')
108 109 if p.strip()])
109 110 no_list.extend([p.strip().lower() for
110 111 p in os.getenv("no_proxy", '').split(',')
111 112 if p.strip()])
112 113 # "http_proxy.always" config is for running tests on localhost
113 114 if (not ui.configbool("http_proxy", "always") and
114 115 host.lower() in no_list):
115 116 ui.debug(_('disabling proxy for %s\n') % host)
116 117 else:
117 118 proxyurl = urlparse.urlunsplit((
118 119 proxyscheme, netlocunsplit(proxyhost, proxyport,
119 120 proxyuser, proxypasswd or ''),
120 121 proxypath, proxyquery, proxyfrag))
121 122 handler = urllib2.ProxyHandler({scheme: proxyurl})
122 123 ui.debug(_('proxying through %s\n') % proxyurl)
123 124
124 125 # urllib2 takes proxy values from the environment and those
125 126 # will take precedence if found, so drop them
126 127 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
127 128 try:
128 129 if os.environ.has_key(env):
129 130 del os.environ[env]
130 131 except OSError:
131 132 pass
132 133
133 134 passmgr = passwordmgr(ui)
134 135 if user:
135 136 ui.debug(_('will use user %s for http auth\n') % user)
136 137 passmgr.add_password(None, host, user, passwd or '')
137 138
138 139 opener = urllib2.build_opener(
139 140 handler,
140 141 urllib2.HTTPBasicAuthHandler(passmgr),
141 142 urllib2.HTTPDigestAuthHandler(passmgr))
142 143
143 144 # 1.0 here is the _protocol_ version
144 145 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
145 146 urllib2.install_opener(opener)
146 147
147 148 def dev(self):
148 149 return -1
149 150
150 151 def lock(self):
151 152 raise util.Abort(_('operation not supported over http'))
152 153
153 154 def do_cmd(self, cmd, **args):
154 155 self.ui.debug(_("sending %s command\n") % cmd)
155 156 q = {"cmd": cmd}
156 157 q.update(args)
157 158 qs = urllib.urlencode(q)
158 159 cu = "%s?%s" % (self.url, qs)
159 160 try:
160 161 resp = urllib2.urlopen(cu)
161 162 except httplib.HTTPException, inst:
162 163 self.ui.debug(_('http error while sending %s command\n') % cmd)
163 164 self.ui.print_exc()
164 165 raise IOError(None, inst)
165 166 try:
166 167 proto = resp.getheader('content-type')
167 168 except AttributeError:
168 169 proto = resp.headers['content-type']
169 170
170 171 # accept old "text/plain" and "application/hg-changegroup" for now
171 172 if not proto.startswith('application/mercurial') and \
172 173 not proto.startswith('text/plain') and \
173 174 not proto.startswith('application/hg-changegroup'):
174 175 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
175 176 self.url)
176 177
177 178 if proto.startswith('application/mercurial'):
178 179 version = proto[22:]
179 180 if float(version) > 0.1:
180 181 raise hg.RepoError(_("'%s' uses newer protocol %s") %
181 182 (self.url, version))
182 183
183 184 return resp
184 185
185 186 def do_read(self, cmd, **args):
186 187 fp = self.do_cmd(cmd, **args)
187 188 try:
188 189 return fp.read()
189 190 finally:
190 191 # if using keepalive, allow connection to be reused
191 192 fp.close()
192 193
193 194 def heads(self):
194 195 d = self.do_read("heads")
195 196 try:
196 197 return map(bin, d[:-1].split(" "))
197 198 except:
198 199 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
199 200 raise
200 201
201 202 def branches(self, nodes):
202 203 n = " ".join(map(hex, nodes))
203 204 d = self.do_read("branches", nodes=n)
204 205 try:
205 206 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
206 207 return br
207 208 except:
208 209 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
209 210 raise
210 211
211 212 def between(self, pairs):
212 213 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
213 214 d = self.do_read("between", pairs=n)
214 215 try:
215 216 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
216 217 return p
217 218 except:
218 219 self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n")
219 220 raise
220 221
221 222 def changegroup(self, nodes, kind):
222 223 n = " ".join(map(hex, nodes))
223 224 f = self.do_cmd("changegroup", roots=n)
224 225 bytes = 0
225 226
226 227 def zgenerator(f):
227 228 zd = zlib.decompressobj()
228 229 try:
229 230 for chnk in f:
230 231 yield zd.decompress(chnk)
231 232 except httplib.HTTPException, inst:
232 233 raise IOError(None, _('connection ended unexpectedly'))
233 234 yield zd.flush()
234 235
235 236 return util.chunkbuffer(zgenerator(util.filechunkiter(f)))
236 237
238 def unbundle(self, cg, heads, source):
239 raise util.Abort(_('operation not supported over http'))
240
237 241 class httpsrepository(httprepository):
238 242 pass
@@ -1,2109 +1,2145 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog")
16 16
17 17 class localrepository(object):
18 capabilities = ()
19
18 20 def __del__(self):
19 21 self.transhandle = None
20 22 def __init__(self, parentui, path=None, create=0):
21 23 if not path:
22 24 p = os.getcwd()
23 25 while not os.path.isdir(os.path.join(p, ".hg")):
24 26 oldp = p
25 27 p = os.path.dirname(p)
26 28 if p == oldp:
27 29 raise repo.RepoError(_("no repo found"))
28 30 path = p
29 31 self.path = os.path.join(path, ".hg")
30 32
31 33 if not create and not os.path.isdir(self.path):
32 34 raise repo.RepoError(_("repository %s not found") % path)
33 35
34 36 self.root = os.path.abspath(path)
35 37 self.origroot = path
36 38 self.ui = ui.ui(parentui=parentui)
37 39 self.opener = util.opener(self.path)
38 40 self.wopener = util.opener(self.root)
39 41
40 42 try:
41 43 self.ui.readconfig(self.join("hgrc"), self.root)
42 44 except IOError:
43 45 pass
44 46
45 47 v = self.ui.revlogopts
46 48 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 49 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 50 fl = v.get('flags', None)
49 51 flags = 0
50 52 if fl != None:
51 53 for x in fl.split():
52 54 flags |= revlog.flagstr(x)
53 55 elif self.revlogv1:
54 56 flags = revlog.REVLOG_DEFAULT_FLAGS
55 57
56 58 v = self.revlogversion | flags
57 59 self.manifest = manifest.manifest(self.opener, v)
58 60 self.changelog = changelog.changelog(self.opener, v)
59 61
60 62 # the changelog might not have the inline index flag
61 63 # on. If the format of the changelog is the same as found in
62 64 # .hgrc, apply any flags found in the .hgrc as well.
63 65 # Otherwise, just version from the changelog
64 66 v = self.changelog.version
65 67 if v == self.revlogversion:
66 68 v |= flags
67 69 self.revlogversion = v
68 70
69 71 self.tagscache = None
70 72 self.nodetagscache = None
71 73 self.encodepats = None
72 74 self.decodepats = None
73 75 self.transhandle = None
74 76
75 77 if create:
76 78 os.mkdir(self.path)
77 79 os.mkdir(self.join("data"))
78 80
79 81 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 82
81 83 def hook(self, name, throw=False, **args):
82 84 def callhook(hname, funcname):
83 85 '''call python hook. hook is callable object, looked up as
84 86 name in python module. if callable returns "true", hook
85 87 fails, else passes. if hook raises exception, treated as
86 88 hook failure. exception propagates if throw is "true".
87 89
88 90 reason for "true" meaning "hook failed" is so that
89 91 unmodified commands (e.g. mercurial.commands.update) can
90 92 be run as hooks without wrappers to convert return values.'''
91 93
92 94 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 95 d = funcname.rfind('.')
94 96 if d == -1:
95 97 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 98 % (hname, funcname))
97 99 modname = funcname[:d]
98 100 try:
99 101 obj = __import__(modname)
100 102 except ImportError:
101 103 raise util.Abort(_('%s hook is invalid '
102 104 '(import of "%s" failed)') %
103 105 (hname, modname))
104 106 try:
105 107 for p in funcname.split('.')[1:]:
106 108 obj = getattr(obj, p)
107 109 except AttributeError, err:
108 110 raise util.Abort(_('%s hook is invalid '
109 111 '("%s" is not defined)') %
110 112 (hname, funcname))
111 113 if not callable(obj):
112 114 raise util.Abort(_('%s hook is invalid '
113 115 '("%s" is not callable)') %
114 116 (hname, funcname))
115 117 try:
116 118 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 119 except (KeyboardInterrupt, util.SignalInterrupt):
118 120 raise
119 121 except Exception, exc:
120 122 if isinstance(exc, util.Abort):
121 123 self.ui.warn(_('error: %s hook failed: %s\n') %
122 124 (hname, exc.args[0] % exc.args[1:]))
123 125 else:
124 126 self.ui.warn(_('error: %s hook raised an exception: '
125 127 '%s\n') % (hname, exc))
126 128 if throw:
127 129 raise
128 130 self.ui.print_exc()
129 131 return True
130 132 if r:
131 133 if throw:
132 134 raise util.Abort(_('%s hook failed') % hname)
133 135 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 136 return r
135 137
136 138 def runhook(name, cmd):
137 139 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 140 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 141 r = util.system(cmd, environ=env, cwd=self.root)
140 142 if r:
141 143 desc, r = util.explain_exit(r)
142 144 if throw:
143 145 raise util.Abort(_('%s hook %s') % (name, desc))
144 146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 147 return r
146 148
147 149 r = False
148 150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 151 if hname.split(".", 1)[0] == name and cmd]
150 152 hooks.sort()
151 153 for hname, cmd in hooks:
152 154 if cmd.startswith('python:'):
153 155 r = callhook(hname, cmd[7:].strip()) or r
154 156 else:
155 157 r = runhook(hname, cmd) or r
156 158 return r
157 159
158 160 def tags(self):
159 161 '''return a mapping of tag to node'''
160 162 if not self.tagscache:
161 163 self.tagscache = {}
162 164
163 165 def parsetag(line, context):
164 166 if not line:
165 167 return
166 168 s = l.split(" ", 1)
167 169 if len(s) != 2:
168 170 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 171 return
170 172 node, key = s
171 173 key = key.strip()
172 174 try:
173 175 bin_n = bin(node)
174 176 except TypeError:
175 177 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 178 (context, node))
177 179 return
178 180 if bin_n not in self.changelog.nodemap:
179 181 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 182 (context, key))
181 183 return
182 184 self.tagscache[key] = bin_n
183 185
184 186 # read the tags file from each head, ending with the tip,
185 187 # and add each tag found to the map, with "newer" ones
186 188 # taking precedence
187 189 heads = self.heads()
188 190 heads.reverse()
189 191 fl = self.file(".hgtags")
190 192 for node in heads:
191 193 change = self.changelog.read(node)
192 194 rev = self.changelog.rev(node)
193 195 fn, ff = self.manifest.find(change[0], '.hgtags')
194 196 if fn is None: continue
195 197 count = 0
196 198 for l in fl.read(fn).splitlines():
197 199 count += 1
198 200 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 201 (rev, short(node), count))
200 202 try:
201 203 f = self.opener("localtags")
202 204 count = 0
203 205 for l in f:
204 206 count += 1
205 207 parsetag(l, _("localtags, line %d") % count)
206 208 except IOError:
207 209 pass
208 210
209 211 self.tagscache['tip'] = self.changelog.tip()
210 212
211 213 return self.tagscache
212 214
213 215 def tagslist(self):
214 216 '''return a list of tags ordered by revision'''
215 217 l = []
216 218 for t, n in self.tags().items():
217 219 try:
218 220 r = self.changelog.rev(n)
219 221 except:
220 222 r = -2 # sort to the beginning of the list if unknown
221 223 l.append((r, t, n))
222 224 l.sort()
223 225 return [(t, n) for r, t, n in l]
224 226
225 227 def nodetags(self, node):
226 228 '''return the tags associated with a node'''
227 229 if not self.nodetagscache:
228 230 self.nodetagscache = {}
229 231 for t, n in self.tags().items():
230 232 self.nodetagscache.setdefault(n, []).append(t)
231 233 return self.nodetagscache.get(node, [])
232 234
233 235 def lookup(self, key):
234 236 try:
235 237 return self.tags()[key]
236 238 except KeyError:
237 239 try:
238 240 return self.changelog.lookup(key)
239 241 except:
240 242 raise repo.RepoError(_("unknown revision '%s'") % key)
241 243
242 244 def dev(self):
243 245 return os.stat(self.path).st_dev
244 246
245 247 def local(self):
246 248 return True
247 249
248 250 def join(self, f):
249 251 return os.path.join(self.path, f)
250 252
251 253 def wjoin(self, f):
252 254 return os.path.join(self.root, f)
253 255
254 256 def file(self, f):
255 257 if f[0] == '/':
256 258 f = f[1:]
257 259 return filelog.filelog(self.opener, f, self.revlogversion)
258 260
259 261 def getcwd(self):
260 262 return self.dirstate.getcwd()
261 263
262 264 def wfile(self, f, mode='r'):
263 265 return self.wopener(f, mode)
264 266
265 267 def wread(self, filename):
266 268 if self.encodepats == None:
267 269 l = []
268 270 for pat, cmd in self.ui.configitems("encode"):
269 271 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 272 l.append((mf, cmd))
271 273 self.encodepats = l
272 274
273 275 data = self.wopener(filename, 'r').read()
274 276
275 277 for mf, cmd in self.encodepats:
276 278 if mf(filename):
277 279 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 280 data = util.filter(data, cmd)
279 281 break
280 282
281 283 return data
282 284
283 285 def wwrite(self, filename, data, fd=None):
284 286 if self.decodepats == None:
285 287 l = []
286 288 for pat, cmd in self.ui.configitems("decode"):
287 289 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 290 l.append((mf, cmd))
289 291 self.decodepats = l
290 292
291 293 for mf, cmd in self.decodepats:
292 294 if mf(filename):
293 295 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 296 data = util.filter(data, cmd)
295 297 break
296 298
297 299 if fd:
298 300 return fd.write(data)
299 301 return self.wopener(filename, 'w').write(data)
300 302
301 303 def transaction(self):
302 304 tr = self.transhandle
303 305 if tr != None and tr.running():
304 306 return tr.nest()
305 307
306 308 # save dirstate for rollback
307 309 try:
308 310 ds = self.opener("dirstate").read()
309 311 except IOError:
310 312 ds = ""
311 313 self.opener("journal.dirstate", "w").write(ds)
312 314
313 315 tr = transaction.transaction(self.ui.warn, self.opener,
314 316 self.join("journal"),
315 317 aftertrans(self.path))
316 318 self.transhandle = tr
317 319 return tr
318 320
319 321 def recover(self):
320 322 l = self.lock()
321 323 if os.path.exists(self.join("journal")):
322 324 self.ui.status(_("rolling back interrupted transaction\n"))
323 325 transaction.rollback(self.opener, self.join("journal"))
324 326 self.reload()
325 327 return True
326 328 else:
327 329 self.ui.warn(_("no interrupted transaction available\n"))
328 330 return False
329 331
330 332 def rollback(self, wlock=None):
331 333 if not wlock:
332 334 wlock = self.wlock()
333 335 l = self.lock()
334 336 if os.path.exists(self.join("undo")):
335 337 self.ui.status(_("rolling back last transaction\n"))
336 338 transaction.rollback(self.opener, self.join("undo"))
337 339 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 340 self.reload()
339 341 self.wreload()
340 342 else:
341 343 self.ui.warn(_("no rollback information available\n"))
342 344
343 345 def wreload(self):
344 346 self.dirstate.read()
345 347
346 348 def reload(self):
347 349 self.changelog.load()
348 350 self.manifest.load()
349 351 self.tagscache = None
350 352 self.nodetagscache = None
351 353
352 354 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 355 desc=None):
354 356 try:
355 357 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 358 except lock.LockHeld, inst:
357 359 if not wait:
358 360 raise
359 361 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 362 (desc, inst.args[0]))
361 363 # default to 600 seconds timeout
362 364 l = lock.lock(self.join(lockname),
363 365 int(self.ui.config("ui", "timeout") or 600),
364 366 releasefn, desc=desc)
365 367 if acquirefn:
366 368 acquirefn()
367 369 return l
368 370
369 371 def lock(self, wait=1):
370 372 return self.do_lock("lock", wait, acquirefn=self.reload,
371 373 desc=_('repository %s') % self.origroot)
372 374
373 375 def wlock(self, wait=1):
374 376 return self.do_lock("wlock", wait, self.dirstate.write,
375 377 self.wreload,
376 378 desc=_('working directory of %s') % self.origroot)
377 379
378 380 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 381 "determine whether a new filenode is needed"
380 382 fp1 = manifest1.get(filename, nullid)
381 383 fp2 = manifest2.get(filename, nullid)
382 384
383 385 if fp2 != nullid:
384 386 # is one parent an ancestor of the other?
385 387 fpa = filelog.ancestor(fp1, fp2)
386 388 if fpa == fp1:
387 389 fp1, fp2 = fp2, nullid
388 390 elif fpa == fp2:
389 391 fp2 = nullid
390 392
391 393 # is the file unmodified from the parent? report existing entry
392 394 if fp2 == nullid and text == filelog.read(fp1):
393 395 return (fp1, None, None)
394 396
395 397 return (None, fp1, fp2)
396 398
397 399 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 400 orig_parent = self.dirstate.parents()[0] or nullid
399 401 p1 = p1 or self.dirstate.parents()[0] or nullid
400 402 p2 = p2 or self.dirstate.parents()[1] or nullid
401 403 c1 = self.changelog.read(p1)
402 404 c2 = self.changelog.read(p2)
403 405 m1 = self.manifest.read(c1[0])
404 406 mf1 = self.manifest.readflags(c1[0])
405 407 m2 = self.manifest.read(c2[0])
406 408 changed = []
407 409
408 410 if orig_parent == p1:
409 411 update_dirstate = 1
410 412 else:
411 413 update_dirstate = 0
412 414
413 415 if not wlock:
414 416 wlock = self.wlock()
415 417 l = self.lock()
416 418 tr = self.transaction()
417 419 mm = m1.copy()
418 420 mfm = mf1.copy()
419 421 linkrev = self.changelog.count()
420 422 for f in files:
421 423 try:
422 424 t = self.wread(f)
423 425 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 426 r = self.file(f)
425 427 mfm[f] = tm
426 428
427 429 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 430 if entry:
429 431 mm[f] = entry
430 432 continue
431 433
432 434 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 435 changed.append(f)
434 436 if update_dirstate:
435 437 self.dirstate.update([f], "n")
436 438 except IOError:
437 439 try:
438 440 del mm[f]
439 441 del mfm[f]
440 442 if update_dirstate:
441 443 self.dirstate.forget([f])
442 444 except:
443 445 # deleted from p2?
444 446 pass
445 447
446 448 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 449 user = user or self.ui.username()
448 450 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 451 tr.close()
450 452 if update_dirstate:
451 453 self.dirstate.setparents(n, nullid)
452 454
453 455 def commit(self, files=None, text="", user=None, date=None,
454 456 match=util.always, force=False, lock=None, wlock=None,
455 457 force_editor=False):
456 458 commit = []
457 459 remove = []
458 460 changed = []
459 461
460 462 if files:
461 463 for f in files:
462 464 s = self.dirstate.state(f)
463 465 if s in 'nmai':
464 466 commit.append(f)
465 467 elif s == 'r':
466 468 remove.append(f)
467 469 else:
468 470 self.ui.warn(_("%s not tracked!\n") % f)
469 471 else:
470 472 modified, added, removed, deleted, unknown = self.changes(match=match)
471 473 commit = modified + added
472 474 remove = removed
473 475
474 476 p1, p2 = self.dirstate.parents()
475 477 c1 = self.changelog.read(p1)
476 478 c2 = self.changelog.read(p2)
477 479 m1 = self.manifest.read(c1[0])
478 480 mf1 = self.manifest.readflags(c1[0])
479 481 m2 = self.manifest.read(c2[0])
480 482
481 483 if not commit and not remove and not force and p2 == nullid:
482 484 self.ui.status(_("nothing changed\n"))
483 485 return None
484 486
485 487 xp1 = hex(p1)
486 488 if p2 == nullid: xp2 = ''
487 489 else: xp2 = hex(p2)
488 490
489 491 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490 492
491 493 if not wlock:
492 494 wlock = self.wlock()
493 495 if not lock:
494 496 lock = self.lock()
495 497 tr = self.transaction()
496 498
497 499 # check in files
498 500 new = {}
499 501 linkrev = self.changelog.count()
500 502 commit.sort()
501 503 for f in commit:
502 504 self.ui.note(f + "\n")
503 505 try:
504 506 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 507 t = self.wread(f)
506 508 except IOError:
507 509 self.ui.warn(_("trouble committing %s!\n") % f)
508 510 raise
509 511
510 512 r = self.file(f)
511 513
512 514 meta = {}
513 515 cp = self.dirstate.copied(f)
514 516 if cp:
515 517 meta["copy"] = cp
516 518 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 519 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 520 fp1, fp2 = nullid, nullid
519 521 else:
520 522 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 523 if entry:
522 524 new[f] = entry
523 525 continue
524 526
525 527 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 528 # remember what we've added so that we can later calculate
527 529 # the files to pull from a set of changesets
528 530 changed.append(f)
529 531
530 532 # update manifest
531 533 m1 = m1.copy()
532 534 m1.update(new)
533 535 for f in remove:
534 536 if f in m1:
535 537 del m1[f]
536 538 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 539 (new, remove))
538 540
539 541 # add changeset
540 542 new = new.keys()
541 543 new.sort()
542 544
543 545 user = user or self.ui.username()
544 546 if not text or force_editor:
545 547 edittext = []
546 548 if text:
547 549 edittext.append(text)
548 550 edittext.append("")
549 551 if p2 != nullid:
550 552 edittext.append("HG: branch merge")
551 553 edittext.extend(["HG: changed %s" % f for f in changed])
552 554 edittext.extend(["HG: removed %s" % f for f in remove])
553 555 if not changed and not remove:
554 556 edittext.append("HG: no files changed")
555 557 edittext.append("")
556 558 # run editor in the repository root
557 559 olddir = os.getcwd()
558 560 os.chdir(self.root)
559 561 text = self.ui.edit("\n".join(edittext), user)
560 562 os.chdir(olddir)
561 563
562 564 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 565 while lines and not lines[0]:
564 566 del lines[0]
565 567 if not lines:
566 568 return None
567 569 text = '\n'.join(lines)
568 570 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 571 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 572 parent2=xp2)
571 573 tr.close()
572 574
573 575 self.dirstate.setparents(n)
574 576 self.dirstate.update(new, "n")
575 577 self.dirstate.forget(remove)
576 578
577 579 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 580 return n
579 581
580 582 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 583 if node:
582 584 fdict = dict.fromkeys(files)
583 585 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 586 fdict.pop(fn, None)
585 587 if match(fn):
586 588 yield 'm', fn
587 589 for fn in fdict:
588 590 if badmatch and badmatch(fn):
589 591 if match(fn):
590 592 yield 'b', fn
591 593 else:
592 594 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 595 util.pathto(self.getcwd(), fn), short(node)))
594 596 else:
595 597 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 598 yield src, fn
597 599
598 600 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 601 wlock=None, show_ignored=None):
600 602 """return changes between two nodes or node and working directory
601 603
602 604 If node1 is None, use the first dirstate parent instead.
603 605 If node2 is None, compare node1 with working directory.
604 606 """
605 607
606 608 def fcmp(fn, mf):
607 609 t1 = self.wread(fn)
608 610 t2 = self.file(fn).read(mf.get(fn, nullid))
609 611 return cmp(t1, t2)
610 612
611 613 def mfmatches(node):
612 614 change = self.changelog.read(node)
613 615 mf = dict(self.manifest.read(change[0]))
614 616 for fn in mf.keys():
615 617 if not match(fn):
616 618 del mf[fn]
617 619 return mf
618 620
619 621 if node1:
620 622 # read the manifest from node1 before the manifest from node2,
621 623 # so that we'll hit the manifest cache if we're going through
622 624 # all the revisions in parent->child order.
623 625 mf1 = mfmatches(node1)
624 626
625 627 # are we comparing the working directory?
626 628 if not node2:
627 629 if not wlock:
628 630 try:
629 631 wlock = self.wlock(wait=0)
630 632 except lock.LockException:
631 633 wlock = None
632 634 lookup, modified, added, removed, deleted, unknown, ignored = (
633 635 self.dirstate.changes(files, match, show_ignored))
634 636
635 637 # are we comparing working dir against its parent?
636 638 if not node1:
637 639 if lookup:
638 640 # do a full compare of any files that might have changed
639 641 mf2 = mfmatches(self.dirstate.parents()[0])
640 642 for f in lookup:
641 643 if fcmp(f, mf2):
642 644 modified.append(f)
643 645 elif wlock is not None:
644 646 self.dirstate.update([f], "n")
645 647 else:
646 648 # we are comparing working dir against non-parent
647 649 # generate a pseudo-manifest for the working dir
648 650 mf2 = mfmatches(self.dirstate.parents()[0])
649 651 for f in lookup + modified + added:
650 652 mf2[f] = ""
651 653 for f in removed:
652 654 if f in mf2:
653 655 del mf2[f]
654 656 else:
655 657 # we are comparing two revisions
656 658 deleted, unknown, ignored = [], [], []
657 659 mf2 = mfmatches(node2)
658 660
659 661 if node1:
660 662 # flush lists from dirstate before comparing manifests
661 663 modified, added = [], []
662 664
663 665 for fn in mf2:
664 666 if mf1.has_key(fn):
665 667 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
666 668 modified.append(fn)
667 669 del mf1[fn]
668 670 else:
669 671 added.append(fn)
670 672
671 673 removed = mf1.keys()
672 674
673 675 # sort and return results:
674 676 for l in modified, added, removed, deleted, unknown, ignored:
675 677 l.sort()
676 678 if show_ignored is None:
677 679 return (modified, added, removed, deleted, unknown)
678 680 else:
679 681 return (modified, added, removed, deleted, unknown, ignored)
680 682
681 683 def add(self, list, wlock=None):
682 684 if not wlock:
683 685 wlock = self.wlock()
684 686 for f in list:
685 687 p = self.wjoin(f)
686 688 if not os.path.exists(p):
687 689 self.ui.warn(_("%s does not exist!\n") % f)
688 690 elif not os.path.isfile(p):
689 691 self.ui.warn(_("%s not added: only files supported currently\n")
690 692 % f)
691 693 elif self.dirstate.state(f) in 'an':
692 694 self.ui.warn(_("%s already tracked!\n") % f)
693 695 else:
694 696 self.dirstate.update([f], "a")
695 697
696 698 def forget(self, list, wlock=None):
697 699 if not wlock:
698 700 wlock = self.wlock()
699 701 for f in list:
700 702 if self.dirstate.state(f) not in 'ai':
701 703 self.ui.warn(_("%s not added!\n") % f)
702 704 else:
703 705 self.dirstate.forget([f])
704 706
705 707 def remove(self, list, unlink=False, wlock=None):
706 708 if unlink:
707 709 for f in list:
708 710 try:
709 711 util.unlink(self.wjoin(f))
710 712 except OSError, inst:
711 713 if inst.errno != errno.ENOENT:
712 714 raise
713 715 if not wlock:
714 716 wlock = self.wlock()
715 717 for f in list:
716 718 p = self.wjoin(f)
717 719 if os.path.exists(p):
718 720 self.ui.warn(_("%s still exists!\n") % f)
719 721 elif self.dirstate.state(f) == 'a':
720 722 self.dirstate.forget([f])
721 723 elif f not in self.dirstate:
722 724 self.ui.warn(_("%s not tracked!\n") % f)
723 725 else:
724 726 self.dirstate.update([f], "r")
725 727
726 728 def undelete(self, list, wlock=None):
727 729 p = self.dirstate.parents()[0]
728 730 mn = self.changelog.read(p)[0]
729 731 mf = self.manifest.readflags(mn)
730 732 m = self.manifest.read(mn)
731 733 if not wlock:
732 734 wlock = self.wlock()
733 735 for f in list:
734 736 if self.dirstate.state(f) not in "r":
735 737 self.ui.warn("%s not removed!\n" % f)
736 738 else:
737 739 t = self.file(f).read(m[f])
738 740 self.wwrite(f, t)
739 741 util.set_exec(self.wjoin(f), mf[f])
740 742 self.dirstate.update([f], "n")
741 743
742 744 def copy(self, source, dest, wlock=None):
743 745 p = self.wjoin(dest)
744 746 if not os.path.exists(p):
745 747 self.ui.warn(_("%s does not exist!\n") % dest)
746 748 elif not os.path.isfile(p):
747 749 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
748 750 else:
749 751 if not wlock:
750 752 wlock = self.wlock()
751 753 if self.dirstate.state(dest) == '?':
752 754 self.dirstate.update([dest], "a")
753 755 self.dirstate.copy(source, dest)
754 756
755 757 def heads(self, start=None):
756 758 heads = self.changelog.heads(start)
757 759 # sort the output in rev descending order
758 760 heads = [(-self.changelog.rev(h), h) for h in heads]
759 761 heads.sort()
760 762 return [n for (r, n) in heads]
761 763
762 764 # branchlookup returns a dict giving a list of branches for
763 765 # each head. A branch is defined as the tag of a node or
764 766 # the branch of the node's parents. If a node has multiple
765 767 # branch tags, tags are eliminated if they are visible from other
766 768 # branch tags.
767 769 #
768 770 # So, for this graph: a->b->c->d->e
769 771 # \ /
770 772 # aa -----/
771 773 # a has tag 2.6.12
772 774 # d has tag 2.6.13
773 775 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
774 776 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
775 777 # from the list.
776 778 #
777 779 # It is possible that more than one head will have the same branch tag.
778 780 # callers need to check the result for multiple heads under the same
779 781 # branch tag if that is a problem for them (ie checkout of a specific
780 782 # branch).
781 783 #
782 784 # passing in a specific branch will limit the depth of the search
783 785 # through the parents. It won't limit the branches returned in the
784 786 # result though.
785 787 def branchlookup(self, heads=None, branch=None):
786 788 if not heads:
787 789 heads = self.heads()
788 790 headt = [ h for h in heads ]
789 791 chlog = self.changelog
790 792 branches = {}
791 793 merges = []
792 794 seenmerge = {}
793 795
794 796 # traverse the tree once for each head, recording in the branches
795 797 # dict which tags are visible from this head. The branches
796 798 # dict also records which tags are visible from each tag
797 799 # while we traverse.
798 800 while headt or merges:
799 801 if merges:
800 802 n, found = merges.pop()
801 803 visit = [n]
802 804 else:
803 805 h = headt.pop()
804 806 visit = [h]
805 807 found = [h]
806 808 seen = {}
807 809 while visit:
808 810 n = visit.pop()
809 811 if n in seen:
810 812 continue
811 813 pp = chlog.parents(n)
812 814 tags = self.nodetags(n)
813 815 if tags:
814 816 for x in tags:
815 817 if x == 'tip':
816 818 continue
817 819 for f in found:
818 820 branches.setdefault(f, {})[n] = 1
819 821 branches.setdefault(n, {})[n] = 1
820 822 break
821 823 if n not in found:
822 824 found.append(n)
823 825 if branch in tags:
824 826 continue
825 827 seen[n] = 1
826 828 if pp[1] != nullid and n not in seenmerge:
827 829 merges.append((pp[1], [x for x in found]))
828 830 seenmerge[n] = 1
829 831 if pp[0] != nullid:
830 832 visit.append(pp[0])
831 833 # traverse the branches dict, eliminating branch tags from each
832 834 # head that are visible from another branch tag for that head.
833 835 out = {}
834 836 viscache = {}
835 837 for h in heads:
836 838 def visible(node):
837 839 if node in viscache:
838 840 return viscache[node]
839 841 ret = {}
840 842 visit = [node]
841 843 while visit:
842 844 x = visit.pop()
843 845 if x in viscache:
844 846 ret.update(viscache[x])
845 847 elif x not in ret:
846 848 ret[x] = 1
847 849 if x in branches:
848 850 visit[len(visit):] = branches[x].keys()
849 851 viscache[node] = ret
850 852 return ret
851 853 if h not in branches:
852 854 continue
853 855 # O(n^2), but somewhat limited. This only searches the
854 856 # tags visible from a specific head, not all the tags in the
855 857 # whole repo.
856 858 for b in branches[h]:
857 859 vis = False
858 860 for bb in branches[h].keys():
859 861 if b != bb:
860 862 if b in visible(bb):
861 863 vis = True
862 864 break
863 865 if not vis:
864 866 l = out.setdefault(h, [])
865 867 l[len(l):] = self.nodetags(b)
866 868 return out
867 869
868 870 def branches(self, nodes):
869 871 if not nodes:
870 872 nodes = [self.changelog.tip()]
871 873 b = []
872 874 for n in nodes:
873 875 t = n
874 876 while 1:
875 877 p = self.changelog.parents(n)
876 878 if p[1] != nullid or p[0] == nullid:
877 879 b.append((t, n, p[0], p[1]))
878 880 break
879 881 n = p[0]
880 882 return b
881 883
882 884 def between(self, pairs):
883 885 r = []
884 886
885 887 for top, bottom in pairs:
886 888 n, l, i = top, [], 0
887 889 f = 1
888 890
889 891 while n != bottom:
890 892 p = self.changelog.parents(n)[0]
891 893 if i == f:
892 894 l.append(n)
893 895 f = f * 2
894 896 n = p
895 897 i += 1
896 898
897 899 r.append(l)
898 900
899 901 return r
900 902
901 903 def findincoming(self, remote, base=None, heads=None, force=False):
902 904 """Return list of roots of the subsets of missing nodes from remote
903 905
904 906 If base dict is specified, assume that these nodes and their parents
905 907 exist on the remote side and that no child of a node of base exists
906 908 in both remote and self.
907 909 Furthermore base will be updated to include the nodes that exists
908 910 in self and remote but no children exists in self and remote.
909 911 If a list of heads is specified, return only nodes which are heads
910 912 or ancestors of these heads.
911 913
912 914 All the ancestors of base are in self and in remote.
913 915 All the descendants of the list returned are missing in self.
914 916 (and so we know that the rest of the nodes are missing in remote, see
915 917 outgoing)
916 918 """
917 919 m = self.changelog.nodemap
918 920 search = []
919 921 fetch = {}
920 922 seen = {}
921 923 seenbranch = {}
922 924 if base == None:
923 925 base = {}
924 926
925 927 if not heads:
926 928 heads = remote.heads()
927 929
928 930 if self.changelog.tip() == nullid:
929 931 base[nullid] = 1
930 932 if heads != [nullid]:
931 933 return [nullid]
932 934 return []
933 935
934 936 # assume we're closer to the tip than the root
935 937 # and start by examining the heads
936 938 self.ui.status(_("searching for changes\n"))
937 939
938 940 unknown = []
939 941 for h in heads:
940 942 if h not in m:
941 943 unknown.append(h)
942 944 else:
943 945 base[h] = 1
944 946
945 947 if not unknown:
946 948 return []
947 949
948 950 req = dict.fromkeys(unknown)
949 951 reqcnt = 0
950 952
951 953 # search through remote branches
952 954 # a 'branch' here is a linear segment of history, with four parts:
953 955 # head, root, first parent, second parent
954 956 # (a branch always has two parents (or none) by definition)
955 957 unknown = remote.branches(unknown)
956 958 while unknown:
957 959 r = []
958 960 while unknown:
959 961 n = unknown.pop(0)
960 962 if n[0] in seen:
961 963 continue
962 964
963 965 self.ui.debug(_("examining %s:%s\n")
964 966 % (short(n[0]), short(n[1])))
965 967 if n[0] == nullid: # found the end of the branch
966 968 pass
967 969 elif n in seenbranch:
968 970 self.ui.debug(_("branch already found\n"))
969 971 continue
970 972 elif n[1] and n[1] in m: # do we know the base?
971 973 self.ui.debug(_("found incomplete branch %s:%s\n")
972 974 % (short(n[0]), short(n[1])))
973 975 search.append(n) # schedule branch range for scanning
974 976 seenbranch[n] = 1
975 977 else:
976 978 if n[1] not in seen and n[1] not in fetch:
977 979 if n[2] in m and n[3] in m:
978 980 self.ui.debug(_("found new changeset %s\n") %
979 981 short(n[1]))
980 982 fetch[n[1]] = 1 # earliest unknown
981 983 for p in n[2:4]:
982 984 if p in m:
983 985 base[p] = 1 # latest known
984 986
985 987 for p in n[2:4]:
986 988 if p not in req and p not in m:
987 989 r.append(p)
988 990 req[p] = 1
989 991 seen[n[0]] = 1
990 992
991 993 if r:
992 994 reqcnt += 1
993 995 self.ui.debug(_("request %d: %s\n") %
994 996 (reqcnt, " ".join(map(short, r))))
995 997 for p in range(0, len(r), 10):
996 998 for b in remote.branches(r[p:p+10]):
997 999 self.ui.debug(_("received %s:%s\n") %
998 1000 (short(b[0]), short(b[1])))
999 1001 unknown.append(b)
1000 1002
1001 1003 # do binary search on the branches we found
1002 1004 while search:
1003 1005 n = search.pop(0)
1004 1006 reqcnt += 1
1005 1007 l = remote.between([(n[0], n[1])])[0]
1006 1008 l.append(n[1])
1007 1009 p = n[0]
1008 1010 f = 1
1009 1011 for i in l:
1010 1012 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1011 1013 if i in m:
1012 1014 if f <= 2:
1013 1015 self.ui.debug(_("found new branch changeset %s\n") %
1014 1016 short(p))
1015 1017 fetch[p] = 1
1016 1018 base[i] = 1
1017 1019 else:
1018 1020 self.ui.debug(_("narrowed branch search to %s:%s\n")
1019 1021 % (short(p), short(i)))
1020 1022 search.append((p, i))
1021 1023 break
1022 1024 p, f = i, f * 2
1023 1025
1024 1026 # sanity check our fetch list
1025 1027 for f in fetch.keys():
1026 1028 if f in m:
1027 1029 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1028 1030
1029 1031 if base.keys() == [nullid]:
1030 1032 if force:
1031 1033 self.ui.warn(_("warning: repository is unrelated\n"))
1032 1034 else:
1033 1035 raise util.Abort(_("repository is unrelated"))
1034 1036
1035 1037 self.ui.note(_("found new changesets starting at ") +
1036 1038 " ".join([short(f) for f in fetch]) + "\n")
1037 1039
1038 1040 self.ui.debug(_("%d total queries\n") % reqcnt)
1039 1041
1040 1042 return fetch.keys()
1041 1043
1042 1044 def findoutgoing(self, remote, base=None, heads=None, force=False):
1043 1045 """Return list of nodes that are roots of subsets not in remote
1044 1046
1045 1047 If base dict is specified, assume that these nodes and their parents
1046 1048 exist on the remote side.
1047 1049 If a list of heads is specified, return only nodes which are heads
1048 1050 or ancestors of these heads, and return a second element which
1049 1051 contains all remote heads which get new children.
1050 1052 """
1051 1053 if base == None:
1052 1054 base = {}
1053 1055 self.findincoming(remote, base, heads, force=force)
1054 1056
1055 1057 self.ui.debug(_("common changesets up to ")
1056 1058 + " ".join(map(short, base.keys())) + "\n")
1057 1059
1058 1060 remain = dict.fromkeys(self.changelog.nodemap)
1059 1061
1060 1062 # prune everything remote has from the tree
1061 1063 del remain[nullid]
1062 1064 remove = base.keys()
1063 1065 while remove:
1064 1066 n = remove.pop(0)
1065 1067 if n in remain:
1066 1068 del remain[n]
1067 1069 for p in self.changelog.parents(n):
1068 1070 remove.append(p)
1069 1071
1070 1072 # find every node whose parents have been pruned
1071 1073 subset = []
1072 1074 # find every remote head that will get new children
1073 1075 updated_heads = {}
1074 1076 for n in remain:
1075 1077 p1, p2 = self.changelog.parents(n)
1076 1078 if p1 not in remain and p2 not in remain:
1077 1079 subset.append(n)
1078 1080 if heads:
1079 1081 if p1 in heads:
1080 1082 updated_heads[p1] = True
1081 1083 if p2 in heads:
1082 1084 updated_heads[p2] = True
1083 1085
1084 1086 # this is the set of all roots we have to push
1085 1087 if heads:
1086 1088 return subset, updated_heads.keys()
1087 1089 else:
1088 1090 return subset
1089 1091
1090 1092 def pull(self, remote, heads=None, force=False):
1091 1093 l = self.lock()
1092 1094
1093 1095 fetch = self.findincoming(remote, force=force)
1094 1096 if fetch == [nullid]:
1095 1097 self.ui.status(_("requesting all changes\n"))
1096 1098
1097 1099 if not fetch:
1098 1100 self.ui.status(_("no changes found\n"))
1099 1101 return 0
1100 1102
1101 1103 if heads is None:
1102 1104 cg = remote.changegroup(fetch, 'pull')
1103 1105 else:
1104 1106 cg = remote.changegroupsubset(fetch, heads, 'pull')
1105 1107 return self.addchangegroup(cg, 'pull')
1106 1108
1107 1109 def push(self, remote, force=False, revs=None):
1108 lock = remote.lock()
1110 # there are two ways to push to remote repo:
1111 #
1112 # addchangegroup assumes local user can lock remote
1113 # repo (local filesystem, old ssh servers).
1114 #
1115 # unbundle assumes local user cannot lock remote repo (new ssh
1116 # servers, http servers).
1109 1117
1118 if 'unbundle' in remote.capabilities:
1119 self.push_unbundle(remote, force, revs)
1120 else:
1121 self.push_addchangegroup(remote, force, revs)
1122
1123 def prepush(self, remote, force, revs):
1110 1124 base = {}
1111 1125 remote_heads = remote.heads()
1112 1126 inc = self.findincoming(remote, base, remote_heads, force=force)
1113 1127 if not force and inc:
1114 1128 self.ui.warn(_("abort: unsynced remote changes!\n"))
1115 1129 self.ui.status(_("(did you forget to sync?"
1116 1130 " use push -f to force)\n"))
1117 return 1
1131 return None, 1
1118 1132
1119 1133 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1120 1134 if revs is not None:
1121 1135 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1122 1136 else:
1123 1137 bases, heads = update, self.changelog.heads()
1124 1138
1125 1139 if not bases:
1126 1140 self.ui.status(_("no changes found\n"))
1127 return 1
1141 return None, 1
1128 1142 elif not force:
1129 1143 # FIXME we don't properly detect creation of new heads
1130 1144 # in the push -r case, assume the user knows what he's doing
1131 1145 if not revs and len(remote_heads) < len(heads) \
1132 1146 and remote_heads != [nullid]:
1133 1147 self.ui.warn(_("abort: push creates new remote branches!\n"))
1134 1148 self.ui.status(_("(did you forget to merge?"
1135 1149 " use push -f to force)\n"))
1136 return 1
1150 return None, 1
1137 1151
1138 1152 if revs is None:
1139 1153 cg = self.changegroup(update, 'push')
1140 1154 else:
1141 1155 cg = self.changegroupsubset(update, revs, 'push')
1142 return remote.addchangegroup(cg, 'push')
1156 return cg, remote_heads
1157
1158 def push_addchangegroup(self, remote, force, revs):
1159 lock = remote.lock()
1160
1161 ret = self.prepush(remote, force, revs)
1162 if ret[0] is not None:
1163 cg, remote_heads = ret
1164 return remote.addchangegroup(cg, 'push')
1165 return ret[1]
1166
1167 def push_unbundle(self, remote, force, revs):
1168 # local repo finds heads on server, finds out what revs it
1169 # must push. once revs transferred, if server finds it has
1170 # different heads (someone else won commit/push race), server
1171 # aborts.
1172
1173 ret = self.prepush(remote, force, revs)
1174 if ret[0] is not None:
1175 cg, remote_heads = ret
1176 if force: remote_heads = ['force']
1177 return remote.unbundle(cg, remote_heads, 'push')
1178 return ret[1]
1143 1179
1144 1180 def changegroupsubset(self, bases, heads, source):
1145 1181 """This function generates a changegroup consisting of all the nodes
1146 1182 that are descendents of any of the bases, and ancestors of any of
1147 1183 the heads.
1148 1184
1149 1185 It is fairly complex as determining which filenodes and which
1150 1186 manifest nodes need to be included for the changeset to be complete
1151 1187 is non-trivial.
1152 1188
1153 1189 Another wrinkle is doing the reverse, figuring out which changeset in
1154 1190 the changegroup a particular filenode or manifestnode belongs to."""
1155 1191
1156 1192 self.hook('preoutgoing', throw=True, source=source)
1157 1193
1158 1194 # Set up some initial variables
1159 1195 # Make it easy to refer to self.changelog
1160 1196 cl = self.changelog
1161 1197 # msng is short for missing - compute the list of changesets in this
1162 1198 # changegroup.
1163 1199 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1164 1200 # Some bases may turn out to be superfluous, and some heads may be
1165 1201 # too. nodesbetween will return the minimal set of bases and heads
1166 1202 # necessary to re-create the changegroup.
1167 1203
1168 1204 # Known heads are the list of heads that it is assumed the recipient
1169 1205 # of this changegroup will know about.
1170 1206 knownheads = {}
1171 1207 # We assume that all parents of bases are known heads.
1172 1208 for n in bases:
1173 1209 for p in cl.parents(n):
1174 1210 if p != nullid:
1175 1211 knownheads[p] = 1
1176 1212 knownheads = knownheads.keys()
1177 1213 if knownheads:
1178 1214 # Now that we know what heads are known, we can compute which
1179 1215 # changesets are known. The recipient must know about all
1180 1216 # changesets required to reach the known heads from the null
1181 1217 # changeset.
1182 1218 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1183 1219 junk = None
1184 1220 # Transform the list into an ersatz set.
1185 1221 has_cl_set = dict.fromkeys(has_cl_set)
1186 1222 else:
1187 1223 # If there were no known heads, the recipient cannot be assumed to
1188 1224 # know about any changesets.
1189 1225 has_cl_set = {}
1190 1226
1191 1227 # Make it easy to refer to self.manifest
1192 1228 mnfst = self.manifest
1193 1229 # We don't know which manifests are missing yet
1194 1230 msng_mnfst_set = {}
1195 1231 # Nor do we know which filenodes are missing.
1196 1232 msng_filenode_set = {}
1197 1233
1198 1234 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1199 1235 junk = None
1200 1236
1201 1237 # A changeset always belongs to itself, so the changenode lookup
1202 1238 # function for a changenode is identity.
1203 1239 def identity(x):
1204 1240 return x
1205 1241
1206 1242 # A function generating function. Sets up an environment for the
1207 1243 # inner function.
1208 1244 def cmp_by_rev_func(revlog):
1209 1245 # Compare two nodes by their revision number in the environment's
1210 1246 # revision history. Since the revision number both represents the
1211 1247 # most efficient order to read the nodes in, and represents a
1212 1248 # topological sorting of the nodes, this function is often useful.
1213 1249 def cmp_by_rev(a, b):
1214 1250 return cmp(revlog.rev(a), revlog.rev(b))
1215 1251 return cmp_by_rev
1216 1252
1217 1253 # If we determine that a particular file or manifest node must be a
1218 1254 # node that the recipient of the changegroup will already have, we can
1219 1255 # also assume the recipient will have all the parents. This function
1220 1256 # prunes them from the set of missing nodes.
1221 1257 def prune_parents(revlog, hasset, msngset):
1222 1258 haslst = hasset.keys()
1223 1259 haslst.sort(cmp_by_rev_func(revlog))
1224 1260 for node in haslst:
1225 1261 parentlst = [p for p in revlog.parents(node) if p != nullid]
1226 1262 while parentlst:
1227 1263 n = parentlst.pop()
1228 1264 if n not in hasset:
1229 1265 hasset[n] = 1
1230 1266 p = [p for p in revlog.parents(n) if p != nullid]
1231 1267 parentlst.extend(p)
1232 1268 for n in hasset:
1233 1269 msngset.pop(n, None)
1234 1270
1235 1271 # This is a function generating function used to set up an environment
1236 1272 # for the inner function to execute in.
1237 1273 def manifest_and_file_collector(changedfileset):
1238 1274 # This is an information gathering function that gathers
1239 1275 # information from each changeset node that goes out as part of
1240 1276 # the changegroup. The information gathered is a list of which
1241 1277 # manifest nodes are potentially required (the recipient may
1242 1278 # already have them) and total list of all files which were
1243 1279 # changed in any changeset in the changegroup.
1244 1280 #
1245 1281 # We also remember the first changenode we saw any manifest
1246 1282 # referenced by so we can later determine which changenode 'owns'
1247 1283 # the manifest.
1248 1284 def collect_manifests_and_files(clnode):
1249 1285 c = cl.read(clnode)
1250 1286 for f in c[3]:
1251 1287 # This is to make sure we only have one instance of each
1252 1288 # filename string for each filename.
1253 1289 changedfileset.setdefault(f, f)
1254 1290 msng_mnfst_set.setdefault(c[0], clnode)
1255 1291 return collect_manifests_and_files
1256 1292
1257 1293 # Figure out which manifest nodes (of the ones we think might be part
1258 1294 # of the changegroup) the recipient must know about and remove them
1259 1295 # from the changegroup.
1260 1296 def prune_manifests():
1261 1297 has_mnfst_set = {}
1262 1298 for n in msng_mnfst_set:
1263 1299 # If a 'missing' manifest thinks it belongs to a changenode
1264 1300 # the recipient is assumed to have, obviously the recipient
1265 1301 # must have that manifest.
1266 1302 linknode = cl.node(mnfst.linkrev(n))
1267 1303 if linknode in has_cl_set:
1268 1304 has_mnfst_set[n] = 1
1269 1305 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1270 1306
1271 1307 # Use the information collected in collect_manifests_and_files to say
1272 1308 # which changenode any manifestnode belongs to.
1273 1309 def lookup_manifest_link(mnfstnode):
1274 1310 return msng_mnfst_set[mnfstnode]
1275 1311
1276 1312 # A function generating function that sets up the initial environment
1277 1313 # the inner function.
1278 1314 def filenode_collector(changedfiles):
1279 1315 next_rev = [0]
1280 1316 # This gathers information from each manifestnode included in the
1281 1317 # changegroup about which filenodes the manifest node references
1282 1318 # so we can include those in the changegroup too.
1283 1319 #
1284 1320 # It also remembers which changenode each filenode belongs to. It
1285 1321 # does this by assuming the a filenode belongs to the changenode
1286 1322 # the first manifest that references it belongs to.
1287 1323 def collect_msng_filenodes(mnfstnode):
1288 1324 r = mnfst.rev(mnfstnode)
1289 1325 if r == next_rev[0]:
1290 1326 # If the last rev we looked at was the one just previous,
1291 1327 # we only need to see a diff.
1292 1328 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1293 1329 # For each line in the delta
1294 1330 for dline in delta.splitlines():
1295 1331 # get the filename and filenode for that line
1296 1332 f, fnode = dline.split('\0')
1297 1333 fnode = bin(fnode[:40])
1298 1334 f = changedfiles.get(f, None)
1299 1335 # And if the file is in the list of files we care
1300 1336 # about.
1301 1337 if f is not None:
1302 1338 # Get the changenode this manifest belongs to
1303 1339 clnode = msng_mnfst_set[mnfstnode]
1304 1340 # Create the set of filenodes for the file if
1305 1341 # there isn't one already.
1306 1342 ndset = msng_filenode_set.setdefault(f, {})
1307 1343 # And set the filenode's changelog node to the
1308 1344 # manifest's if it hasn't been set already.
1309 1345 ndset.setdefault(fnode, clnode)
1310 1346 else:
1311 1347 # Otherwise we need a full manifest.
1312 1348 m = mnfst.read(mnfstnode)
1313 1349 # For every file in we care about.
1314 1350 for f in changedfiles:
1315 1351 fnode = m.get(f, None)
1316 1352 # If it's in the manifest
1317 1353 if fnode is not None:
1318 1354 # See comments above.
1319 1355 clnode = msng_mnfst_set[mnfstnode]
1320 1356 ndset = msng_filenode_set.setdefault(f, {})
1321 1357 ndset.setdefault(fnode, clnode)
1322 1358 # Remember the revision we hope to see next.
1323 1359 next_rev[0] = r + 1
1324 1360 return collect_msng_filenodes
1325 1361
1326 1362 # We have a list of filenodes we think we need for a file, lets remove
1327 1363 # all those we now the recipient must have.
1328 1364 def prune_filenodes(f, filerevlog):
1329 1365 msngset = msng_filenode_set[f]
1330 1366 hasset = {}
1331 1367 # If a 'missing' filenode thinks it belongs to a changenode we
1332 1368 # assume the recipient must have, then the recipient must have
1333 1369 # that filenode.
1334 1370 for n in msngset:
1335 1371 clnode = cl.node(filerevlog.linkrev(n))
1336 1372 if clnode in has_cl_set:
1337 1373 hasset[n] = 1
1338 1374 prune_parents(filerevlog, hasset, msngset)
1339 1375
1340 1376 # A function generator function that sets up the a context for the
1341 1377 # inner function.
1342 1378 def lookup_filenode_link_func(fname):
1343 1379 msngset = msng_filenode_set[fname]
1344 1380 # Lookup the changenode the filenode belongs to.
1345 1381 def lookup_filenode_link(fnode):
1346 1382 return msngset[fnode]
1347 1383 return lookup_filenode_link
1348 1384
1349 1385 # Now that we have all theses utility functions to help out and
1350 1386 # logically divide up the task, generate the group.
1351 1387 def gengroup():
1352 1388 # The set of changed files starts empty.
1353 1389 changedfiles = {}
1354 1390 # Create a changenode group generator that will call our functions
1355 1391 # back to lookup the owning changenode and collect information.
1356 1392 group = cl.group(msng_cl_lst, identity,
1357 1393 manifest_and_file_collector(changedfiles))
1358 1394 for chnk in group:
1359 1395 yield chnk
1360 1396
1361 1397 # The list of manifests has been collected by the generator
1362 1398 # calling our functions back.
1363 1399 prune_manifests()
1364 1400 msng_mnfst_lst = msng_mnfst_set.keys()
1365 1401 # Sort the manifestnodes by revision number.
1366 1402 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1367 1403 # Create a generator for the manifestnodes that calls our lookup
1368 1404 # and data collection functions back.
1369 1405 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1370 1406 filenode_collector(changedfiles))
1371 1407 for chnk in group:
1372 1408 yield chnk
1373 1409
1374 1410 # These are no longer needed, dereference and toss the memory for
1375 1411 # them.
1376 1412 msng_mnfst_lst = None
1377 1413 msng_mnfst_set.clear()
1378 1414
1379 1415 changedfiles = changedfiles.keys()
1380 1416 changedfiles.sort()
1381 1417 # Go through all our files in order sorted by name.
1382 1418 for fname in changedfiles:
1383 1419 filerevlog = self.file(fname)
1384 1420 # Toss out the filenodes that the recipient isn't really
1385 1421 # missing.
1386 1422 if msng_filenode_set.has_key(fname):
1387 1423 prune_filenodes(fname, filerevlog)
1388 1424 msng_filenode_lst = msng_filenode_set[fname].keys()
1389 1425 else:
1390 1426 msng_filenode_lst = []
1391 1427 # If any filenodes are left, generate the group for them,
1392 1428 # otherwise don't bother.
1393 1429 if len(msng_filenode_lst) > 0:
1394 1430 yield changegroup.genchunk(fname)
1395 1431 # Sort the filenodes by their revision #
1396 1432 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1397 1433 # Create a group generator and only pass in a changenode
1398 1434 # lookup function as we need to collect no information
1399 1435 # from filenodes.
1400 1436 group = filerevlog.group(msng_filenode_lst,
1401 1437 lookup_filenode_link_func(fname))
1402 1438 for chnk in group:
1403 1439 yield chnk
1404 1440 if msng_filenode_set.has_key(fname):
1405 1441 # Don't need this anymore, toss it to free memory.
1406 1442 del msng_filenode_set[fname]
1407 1443 # Signal that no more groups are left.
1408 1444 yield changegroup.closechunk()
1409 1445
1410 1446 if msng_cl_lst:
1411 1447 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1412 1448
1413 1449 return util.chunkbuffer(gengroup())
1414 1450
1415 1451 def changegroup(self, basenodes, source):
1416 1452 """Generate a changegroup of all nodes that we have that a recipient
1417 1453 doesn't.
1418 1454
1419 1455 This is much easier than the previous function as we can assume that
1420 1456 the recipient has any changenode we aren't sending them."""
1421 1457
1422 1458 self.hook('preoutgoing', throw=True, source=source)
1423 1459
1424 1460 cl = self.changelog
1425 1461 nodes = cl.nodesbetween(basenodes, None)[0]
1426 1462 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1427 1463
1428 1464 def identity(x):
1429 1465 return x
1430 1466
1431 1467 def gennodelst(revlog):
1432 1468 for r in xrange(0, revlog.count()):
1433 1469 n = revlog.node(r)
1434 1470 if revlog.linkrev(n) in revset:
1435 1471 yield n
1436 1472
1437 1473 def changed_file_collector(changedfileset):
1438 1474 def collect_changed_files(clnode):
1439 1475 c = cl.read(clnode)
1440 1476 for fname in c[3]:
1441 1477 changedfileset[fname] = 1
1442 1478 return collect_changed_files
1443 1479
1444 1480 def lookuprevlink_func(revlog):
1445 1481 def lookuprevlink(n):
1446 1482 return cl.node(revlog.linkrev(n))
1447 1483 return lookuprevlink
1448 1484
1449 1485 def gengroup():
1450 1486 # construct a list of all changed files
1451 1487 changedfiles = {}
1452 1488
1453 1489 for chnk in cl.group(nodes, identity,
1454 1490 changed_file_collector(changedfiles)):
1455 1491 yield chnk
1456 1492 changedfiles = changedfiles.keys()
1457 1493 changedfiles.sort()
1458 1494
1459 1495 mnfst = self.manifest
1460 1496 nodeiter = gennodelst(mnfst)
1461 1497 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1462 1498 yield chnk
1463 1499
1464 1500 for fname in changedfiles:
1465 1501 filerevlog = self.file(fname)
1466 1502 nodeiter = gennodelst(filerevlog)
1467 1503 nodeiter = list(nodeiter)
1468 1504 if nodeiter:
1469 1505 yield changegroup.genchunk(fname)
1470 1506 lookup = lookuprevlink_func(filerevlog)
1471 1507 for chnk in filerevlog.group(nodeiter, lookup):
1472 1508 yield chnk
1473 1509
1474 1510 yield changegroup.closechunk()
1475 1511
1476 1512 if nodes:
1477 1513 self.hook('outgoing', node=hex(nodes[0]), source=source)
1478 1514
1479 1515 return util.chunkbuffer(gengroup())
1480 1516
1481 1517 def addchangegroup(self, source, srctype):
1482 1518 """add changegroup to repo.
1483 1519 returns number of heads modified or added + 1."""
1484 1520
1485 1521 def csmap(x):
1486 1522 self.ui.debug(_("add changeset %s\n") % short(x))
1487 1523 return cl.count()
1488 1524
1489 1525 def revmap(x):
1490 1526 return cl.rev(x)
1491 1527
1492 1528 if not source:
1493 1529 return 0
1494 1530
1495 1531 self.hook('prechangegroup', throw=True, source=srctype)
1496 1532
1497 1533 changesets = files = revisions = 0
1498 1534
1499 1535 tr = self.transaction()
1500 1536
1501 1537 # write changelog data to temp files so concurrent readers will not see
1502 1538 # inconsistent view
1503 1539 cl = None
1504 1540 try:
1505 1541 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1506 1542
1507 1543 oldheads = len(cl.heads())
1508 1544
1509 1545 # pull off the changeset group
1510 1546 self.ui.status(_("adding changesets\n"))
1511 1547 cor = cl.count() - 1
1512 1548 chunkiter = changegroup.chunkiter(source)
1513 1549 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1514 1550 raise util.Abort(_("received changelog group is empty"))
1515 1551 cnr = cl.count() - 1
1516 1552 changesets = cnr - cor
1517 1553
1518 1554 # pull off the manifest group
1519 1555 self.ui.status(_("adding manifests\n"))
1520 1556 chunkiter = changegroup.chunkiter(source)
1521 1557 # no need to check for empty manifest group here:
1522 1558 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1523 1559 # no new manifest will be created and the manifest group will
1524 1560 # be empty during the pull
1525 1561 self.manifest.addgroup(chunkiter, revmap, tr)
1526 1562
1527 1563 # process the files
1528 1564 self.ui.status(_("adding file changes\n"))
1529 1565 while 1:
1530 1566 f = changegroup.getchunk(source)
1531 1567 if not f:
1532 1568 break
1533 1569 self.ui.debug(_("adding %s revisions\n") % f)
1534 1570 fl = self.file(f)
1535 1571 o = fl.count()
1536 1572 chunkiter = changegroup.chunkiter(source)
1537 1573 if fl.addgroup(chunkiter, revmap, tr) is None:
1538 1574 raise util.Abort(_("received file revlog group is empty"))
1539 1575 revisions += fl.count() - o
1540 1576 files += 1
1541 1577
1542 1578 cl.writedata()
1543 1579 finally:
1544 1580 if cl:
1545 1581 cl.cleanup()
1546 1582
1547 1583 # make changelog see real files again
1548 1584 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1549 1585 self.changelog.checkinlinesize(tr)
1550 1586
1551 1587 newheads = len(self.changelog.heads())
1552 1588 heads = ""
1553 1589 if oldheads and newheads != oldheads:
1554 1590 heads = _(" (%+d heads)") % (newheads - oldheads)
1555 1591
1556 1592 self.ui.status(_("added %d changesets"
1557 1593 " with %d changes to %d files%s\n")
1558 1594 % (changesets, revisions, files, heads))
1559 1595
1560 1596 if changesets > 0:
1561 1597 self.hook('pretxnchangegroup', throw=True,
1562 1598 node=hex(self.changelog.node(cor+1)), source=srctype)
1563 1599
1564 1600 tr.close()
1565 1601
1566 1602 if changesets > 0:
1567 1603 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1568 1604 source=srctype)
1569 1605
1570 1606 for i in range(cor + 1, cnr + 1):
1571 1607 self.hook("incoming", node=hex(self.changelog.node(i)),
1572 1608 source=srctype)
1573 1609
1574 1610 return newheads - oldheads + 1
1575 1611
1576 1612 def update(self, node, allow=False, force=False, choose=None,
1577 1613 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1578 1614 pl = self.dirstate.parents()
1579 1615 if not force and pl[1] != nullid:
1580 1616 raise util.Abort(_("outstanding uncommitted merges"))
1581 1617
1582 1618 err = False
1583 1619
1584 1620 p1, p2 = pl[0], node
1585 1621 pa = self.changelog.ancestor(p1, p2)
1586 1622 m1n = self.changelog.read(p1)[0]
1587 1623 m2n = self.changelog.read(p2)[0]
1588 1624 man = self.manifest.ancestor(m1n, m2n)
1589 1625 m1 = self.manifest.read(m1n)
1590 1626 mf1 = self.manifest.readflags(m1n)
1591 1627 m2 = self.manifest.read(m2n).copy()
1592 1628 mf2 = self.manifest.readflags(m2n)
1593 1629 ma = self.manifest.read(man)
1594 1630 mfa = self.manifest.readflags(man)
1595 1631
1596 1632 modified, added, removed, deleted, unknown = self.changes()
1597 1633
1598 1634 # is this a jump, or a merge? i.e. is there a linear path
1599 1635 # from p1 to p2?
1600 1636 linear_path = (pa == p1 or pa == p2)
1601 1637
1602 1638 if allow and linear_path:
1603 1639 raise util.Abort(_("there is nothing to merge, "
1604 1640 "just use 'hg update'"))
1605 1641 if allow and not forcemerge:
1606 1642 if modified or added or removed:
1607 1643 raise util.Abort(_("outstanding uncommitted changes"))
1608 1644
1609 1645 if not forcemerge and not force:
1610 1646 for f in unknown:
1611 1647 if f in m2:
1612 1648 t1 = self.wread(f)
1613 1649 t2 = self.file(f).read(m2[f])
1614 1650 if cmp(t1, t2) != 0:
1615 1651 raise util.Abort(_("'%s' already exists in the working"
1616 1652 " dir and differs from remote") % f)
1617 1653
1618 1654 # resolve the manifest to determine which files
1619 1655 # we care about merging
1620 1656 self.ui.note(_("resolving manifests\n"))
1621 1657 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1622 1658 (force, allow, moddirstate, linear_path))
1623 1659 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1624 1660 (short(man), short(m1n), short(m2n)))
1625 1661
1626 1662 merge = {}
1627 1663 get = {}
1628 1664 remove = []
1629 1665
1630 1666 # construct a working dir manifest
1631 1667 mw = m1.copy()
1632 1668 mfw = mf1.copy()
1633 1669 umap = dict.fromkeys(unknown)
1634 1670
1635 1671 for f in added + modified + unknown:
1636 1672 mw[f] = ""
1637 1673 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1638 1674
1639 1675 if moddirstate and not wlock:
1640 1676 wlock = self.wlock()
1641 1677
1642 1678 for f in deleted + removed:
1643 1679 if f in mw:
1644 1680 del mw[f]
1645 1681
1646 1682 # If we're jumping between revisions (as opposed to merging),
1647 1683 # and if neither the working directory nor the target rev has
1648 1684 # the file, then we need to remove it from the dirstate, to
1649 1685 # prevent the dirstate from listing the file when it is no
1650 1686 # longer in the manifest.
1651 1687 if moddirstate and linear_path and f not in m2:
1652 1688 self.dirstate.forget((f,))
1653 1689
1654 1690 # Compare manifests
1655 1691 for f, n in mw.iteritems():
1656 1692 if choose and not choose(f):
1657 1693 continue
1658 1694 if f in m2:
1659 1695 s = 0
1660 1696
1661 1697 # is the wfile new since m1, and match m2?
1662 1698 if f not in m1:
1663 1699 t1 = self.wread(f)
1664 1700 t2 = self.file(f).read(m2[f])
1665 1701 if cmp(t1, t2) == 0:
1666 1702 n = m2[f]
1667 1703 del t1, t2
1668 1704
1669 1705 # are files different?
1670 1706 if n != m2[f]:
1671 1707 a = ma.get(f, nullid)
1672 1708 # are both different from the ancestor?
1673 1709 if n != a and m2[f] != a:
1674 1710 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1675 1711 # merge executable bits
1676 1712 # "if we changed or they changed, change in merge"
1677 1713 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1678 1714 mode = ((a^b) | (a^c)) ^ a
1679 1715 merge[f] = (m1.get(f, nullid), m2[f], mode)
1680 1716 s = 1
1681 1717 # are we clobbering?
1682 1718 # is remote's version newer?
1683 1719 # or are we going back in time?
1684 1720 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1685 1721 self.ui.debug(_(" remote %s is newer, get\n") % f)
1686 1722 get[f] = m2[f]
1687 1723 s = 1
1688 1724 elif f in umap or f in added:
1689 1725 # this unknown file is the same as the checkout
1690 1726 # we need to reset the dirstate if the file was added
1691 1727 get[f] = m2[f]
1692 1728
1693 1729 if not s and mfw[f] != mf2[f]:
1694 1730 if force:
1695 1731 self.ui.debug(_(" updating permissions for %s\n") % f)
1696 1732 util.set_exec(self.wjoin(f), mf2[f])
1697 1733 else:
1698 1734 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 1735 mode = ((a^b) | (a^c)) ^ a
1700 1736 if mode != b:
1701 1737 self.ui.debug(_(" updating permissions for %s\n")
1702 1738 % f)
1703 1739 util.set_exec(self.wjoin(f), mode)
1704 1740 del m2[f]
1705 1741 elif f in ma:
1706 1742 if n != ma[f]:
1707 1743 r = _("d")
1708 1744 if not force and (linear_path or allow):
1709 1745 r = self.ui.prompt(
1710 1746 (_(" local changed %s which remote deleted\n") % f) +
1711 1747 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1712 1748 if r == _("d"):
1713 1749 remove.append(f)
1714 1750 else:
1715 1751 self.ui.debug(_("other deleted %s\n") % f)
1716 1752 remove.append(f) # other deleted it
1717 1753 else:
1718 1754 # file is created on branch or in working directory
1719 1755 if force and f not in umap:
1720 1756 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1721 1757 remove.append(f)
1722 1758 elif n == m1.get(f, nullid): # same as parent
1723 1759 if p2 == pa: # going backwards?
1724 1760 self.ui.debug(_("remote deleted %s\n") % f)
1725 1761 remove.append(f)
1726 1762 else:
1727 1763 self.ui.debug(_("local modified %s, keeping\n") % f)
1728 1764 else:
1729 1765 self.ui.debug(_("working dir created %s, keeping\n") % f)
1730 1766
1731 1767 for f, n in m2.iteritems():
1732 1768 if choose and not choose(f):
1733 1769 continue
1734 1770 if f[0] == "/":
1735 1771 continue
1736 1772 if f in ma and n != ma[f]:
1737 1773 r = _("k")
1738 1774 if not force and (linear_path or allow):
1739 1775 r = self.ui.prompt(
1740 1776 (_("remote changed %s which local deleted\n") % f) +
1741 1777 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1742 1778 if r == _("k"):
1743 1779 get[f] = n
1744 1780 elif f not in ma:
1745 1781 self.ui.debug(_("remote created %s\n") % f)
1746 1782 get[f] = n
1747 1783 else:
1748 1784 if force or p2 == pa: # going backwards?
1749 1785 self.ui.debug(_("local deleted %s, recreating\n") % f)
1750 1786 get[f] = n
1751 1787 else:
1752 1788 self.ui.debug(_("local deleted %s\n") % f)
1753 1789
1754 1790 del mw, m1, m2, ma
1755 1791
1756 1792 if force:
1757 1793 for f in merge:
1758 1794 get[f] = merge[f][1]
1759 1795 merge = {}
1760 1796
1761 1797 if linear_path or force:
1762 1798 # we don't need to do any magic, just jump to the new rev
1763 1799 branch_merge = False
1764 1800 p1, p2 = p2, nullid
1765 1801 else:
1766 1802 if not allow:
1767 1803 self.ui.status(_("this update spans a branch"
1768 1804 " affecting the following files:\n"))
1769 1805 fl = merge.keys() + get.keys()
1770 1806 fl.sort()
1771 1807 for f in fl:
1772 1808 cf = ""
1773 1809 if f in merge:
1774 1810 cf = _(" (resolve)")
1775 1811 self.ui.status(" %s%s\n" % (f, cf))
1776 1812 self.ui.warn(_("aborting update spanning branches!\n"))
1777 1813 self.ui.status(_("(use 'hg merge' to merge across branches"
1778 1814 " or 'hg update -C' to lose changes)\n"))
1779 1815 return 1
1780 1816 branch_merge = True
1781 1817
1782 1818 xp1 = hex(p1)
1783 1819 xp2 = hex(p2)
1784 1820 if p2 == nullid: xxp2 = ''
1785 1821 else: xxp2 = xp2
1786 1822
1787 1823 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1788 1824
1789 1825 # get the files we don't need to change
1790 1826 files = get.keys()
1791 1827 files.sort()
1792 1828 for f in files:
1793 1829 if f[0] == "/":
1794 1830 continue
1795 1831 self.ui.note(_("getting %s\n") % f)
1796 1832 t = self.file(f).read(get[f])
1797 1833 self.wwrite(f, t)
1798 1834 util.set_exec(self.wjoin(f), mf2[f])
1799 1835 if moddirstate:
1800 1836 if branch_merge:
1801 1837 self.dirstate.update([f], 'n', st_mtime=-1)
1802 1838 else:
1803 1839 self.dirstate.update([f], 'n')
1804 1840
1805 1841 # merge the tricky bits
1806 1842 failedmerge = []
1807 1843 files = merge.keys()
1808 1844 files.sort()
1809 1845 for f in files:
1810 1846 self.ui.status(_("merging %s\n") % f)
1811 1847 my, other, flag = merge[f]
1812 1848 ret = self.merge3(f, my, other, xp1, xp2)
1813 1849 if ret:
1814 1850 err = True
1815 1851 failedmerge.append(f)
1816 1852 util.set_exec(self.wjoin(f), flag)
1817 1853 if moddirstate:
1818 1854 if branch_merge:
1819 1855 # We've done a branch merge, mark this file as merged
1820 1856 # so that we properly record the merger later
1821 1857 self.dirstate.update([f], 'm')
1822 1858 else:
1823 1859 # We've update-merged a locally modified file, so
1824 1860 # we set the dirstate to emulate a normal checkout
1825 1861 # of that file some time in the past. Thus our
1826 1862 # merge will appear as a normal local file
1827 1863 # modification.
1828 1864 f_len = len(self.file(f).read(other))
1829 1865 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1830 1866
1831 1867 remove.sort()
1832 1868 for f in remove:
1833 1869 self.ui.note(_("removing %s\n") % f)
1834 1870 util.audit_path(f)
1835 1871 try:
1836 1872 util.unlink(self.wjoin(f))
1837 1873 except OSError, inst:
1838 1874 if inst.errno != errno.ENOENT:
1839 1875 self.ui.warn(_("update failed to remove %s: %s!\n") %
1840 1876 (f, inst.strerror))
1841 1877 if moddirstate:
1842 1878 if branch_merge:
1843 1879 self.dirstate.update(remove, 'r')
1844 1880 else:
1845 1881 self.dirstate.forget(remove)
1846 1882
1847 1883 if moddirstate:
1848 1884 self.dirstate.setparents(p1, p2)
1849 1885
1850 1886 if show_stats:
1851 1887 stats = ((len(get), _("updated")),
1852 1888 (len(merge) - len(failedmerge), _("merged")),
1853 1889 (len(remove), _("removed")),
1854 1890 (len(failedmerge), _("unresolved")))
1855 1891 note = ", ".join([_("%d files %s") % s for s in stats])
1856 1892 self.ui.status("%s\n" % note)
1857 1893 if moddirstate:
1858 1894 if branch_merge:
1859 1895 if failedmerge:
1860 1896 self.ui.status(_("There are unresolved merges,"
1861 1897 " you can redo the full merge using:\n"
1862 1898 " hg update -C %s\n"
1863 1899 " hg merge %s\n"
1864 1900 % (self.changelog.rev(p1),
1865 1901 self.changelog.rev(p2))))
1866 1902 else:
1867 1903 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1868 1904 elif failedmerge:
1869 1905 self.ui.status(_("There are unresolved merges with"
1870 1906 " locally modified files.\n"))
1871 1907
1872 1908 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1873 1909 return err
1874 1910
1875 1911 def merge3(self, fn, my, other, p1, p2):
1876 1912 """perform a 3-way merge in the working directory"""
1877 1913
1878 1914 def temp(prefix, node):
1879 1915 pre = "%s~%s." % (os.path.basename(fn), prefix)
1880 1916 (fd, name) = tempfile.mkstemp(prefix=pre)
1881 1917 f = os.fdopen(fd, "wb")
1882 1918 self.wwrite(fn, fl.read(node), f)
1883 1919 f.close()
1884 1920 return name
1885 1921
1886 1922 fl = self.file(fn)
1887 1923 base = fl.ancestor(my, other)
1888 1924 a = self.wjoin(fn)
1889 1925 b = temp("base", base)
1890 1926 c = temp("other", other)
1891 1927
1892 1928 self.ui.note(_("resolving %s\n") % fn)
1893 1929 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1894 1930 (fn, short(my), short(other), short(base)))
1895 1931
1896 1932 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1897 1933 or "hgmerge")
1898 1934 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1899 1935 environ={'HG_FILE': fn,
1900 1936 'HG_MY_NODE': p1,
1901 1937 'HG_OTHER_NODE': p2,
1902 1938 'HG_FILE_MY_NODE': hex(my),
1903 1939 'HG_FILE_OTHER_NODE': hex(other),
1904 1940 'HG_FILE_BASE_NODE': hex(base)})
1905 1941 if r:
1906 1942 self.ui.warn(_("merging %s failed!\n") % fn)
1907 1943
1908 1944 os.unlink(b)
1909 1945 os.unlink(c)
1910 1946 return r
1911 1947
1912 1948 def verify(self):
1913 1949 filelinkrevs = {}
1914 1950 filenodes = {}
1915 1951 changesets = revisions = files = 0
1916 1952 errors = [0]
1917 1953 warnings = [0]
1918 1954 neededmanifests = {}
1919 1955
1920 1956 def err(msg):
1921 1957 self.ui.warn(msg + "\n")
1922 1958 errors[0] += 1
1923 1959
1924 1960 def warn(msg):
1925 1961 self.ui.warn(msg + "\n")
1926 1962 warnings[0] += 1
1927 1963
1928 1964 def checksize(obj, name):
1929 1965 d = obj.checksize()
1930 1966 if d[0]:
1931 1967 err(_("%s data length off by %d bytes") % (name, d[0]))
1932 1968 if d[1]:
1933 1969 err(_("%s index contains %d extra bytes") % (name, d[1]))
1934 1970
1935 1971 def checkversion(obj, name):
1936 1972 if obj.version != revlog.REVLOGV0:
1937 1973 if not revlogv1:
1938 1974 warn(_("warning: `%s' uses revlog format 1") % name)
1939 1975 elif revlogv1:
1940 1976 warn(_("warning: `%s' uses revlog format 0") % name)
1941 1977
1942 1978 revlogv1 = self.revlogversion != revlog.REVLOGV0
1943 1979 if self.ui.verbose or revlogv1 != self.revlogv1:
1944 1980 self.ui.status(_("repository uses revlog format %d\n") %
1945 1981 (revlogv1 and 1 or 0))
1946 1982
1947 1983 seen = {}
1948 1984 self.ui.status(_("checking changesets\n"))
1949 1985 checksize(self.changelog, "changelog")
1950 1986
1951 1987 for i in range(self.changelog.count()):
1952 1988 changesets += 1
1953 1989 n = self.changelog.node(i)
1954 1990 l = self.changelog.linkrev(n)
1955 1991 if l != i:
1956 1992 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1957 1993 if n in seen:
1958 1994 err(_("duplicate changeset at revision %d") % i)
1959 1995 seen[n] = 1
1960 1996
1961 1997 for p in self.changelog.parents(n):
1962 1998 if p not in self.changelog.nodemap:
1963 1999 err(_("changeset %s has unknown parent %s") %
1964 2000 (short(n), short(p)))
1965 2001 try:
1966 2002 changes = self.changelog.read(n)
1967 2003 except KeyboardInterrupt:
1968 2004 self.ui.warn(_("interrupted"))
1969 2005 raise
1970 2006 except Exception, inst:
1971 2007 err(_("unpacking changeset %s: %s") % (short(n), inst))
1972 2008 continue
1973 2009
1974 2010 neededmanifests[changes[0]] = n
1975 2011
1976 2012 for f in changes[3]:
1977 2013 filelinkrevs.setdefault(f, []).append(i)
1978 2014
1979 2015 seen = {}
1980 2016 self.ui.status(_("checking manifests\n"))
1981 2017 checkversion(self.manifest, "manifest")
1982 2018 checksize(self.manifest, "manifest")
1983 2019
1984 2020 for i in range(self.manifest.count()):
1985 2021 n = self.manifest.node(i)
1986 2022 l = self.manifest.linkrev(n)
1987 2023
1988 2024 if l < 0 or l >= self.changelog.count():
1989 2025 err(_("bad manifest link (%d) at revision %d") % (l, i))
1990 2026
1991 2027 if n in neededmanifests:
1992 2028 del neededmanifests[n]
1993 2029
1994 2030 if n in seen:
1995 2031 err(_("duplicate manifest at revision %d") % i)
1996 2032
1997 2033 seen[n] = 1
1998 2034
1999 2035 for p in self.manifest.parents(n):
2000 2036 if p not in self.manifest.nodemap:
2001 2037 err(_("manifest %s has unknown parent %s") %
2002 2038 (short(n), short(p)))
2003 2039
2004 2040 try:
2005 2041 delta = mdiff.patchtext(self.manifest.delta(n))
2006 2042 except KeyboardInterrupt:
2007 2043 self.ui.warn(_("interrupted"))
2008 2044 raise
2009 2045 except Exception, inst:
2010 2046 err(_("unpacking manifest %s: %s") % (short(n), inst))
2011 2047 continue
2012 2048
2013 2049 try:
2014 2050 ff = [ l.split('\0') for l in delta.splitlines() ]
2015 2051 for f, fn in ff:
2016 2052 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2017 2053 except (ValueError, TypeError), inst:
2018 2054 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2019 2055
2020 2056 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2021 2057
2022 2058 for m, c in neededmanifests.items():
2023 2059 err(_("Changeset %s refers to unknown manifest %s") %
2024 2060 (short(m), short(c)))
2025 2061 del neededmanifests
2026 2062
2027 2063 for f in filenodes:
2028 2064 if f not in filelinkrevs:
2029 2065 err(_("file %s in manifest but not in changesets") % f)
2030 2066
2031 2067 for f in filelinkrevs:
2032 2068 if f not in filenodes:
2033 2069 err(_("file %s in changeset but not in manifest") % f)
2034 2070
2035 2071 self.ui.status(_("checking files\n"))
2036 2072 ff = filenodes.keys()
2037 2073 ff.sort()
2038 2074 for f in ff:
2039 2075 if f == "/dev/null":
2040 2076 continue
2041 2077 files += 1
2042 2078 if not f:
2043 2079 err(_("file without name in manifest %s") % short(n))
2044 2080 continue
2045 2081 fl = self.file(f)
2046 2082 checkversion(fl, f)
2047 2083 checksize(fl, f)
2048 2084
2049 2085 nodes = {nullid: 1}
2050 2086 seen = {}
2051 2087 for i in range(fl.count()):
2052 2088 revisions += 1
2053 2089 n = fl.node(i)
2054 2090
2055 2091 if n in seen:
2056 2092 err(_("%s: duplicate revision %d") % (f, i))
2057 2093 if n not in filenodes[f]:
2058 2094 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2059 2095 else:
2060 2096 del filenodes[f][n]
2061 2097
2062 2098 flr = fl.linkrev(n)
2063 2099 if flr not in filelinkrevs.get(f, []):
2064 2100 err(_("%s:%s points to unexpected changeset %d")
2065 2101 % (f, short(n), flr))
2066 2102 else:
2067 2103 filelinkrevs[f].remove(flr)
2068 2104
2069 2105 # verify contents
2070 2106 try:
2071 2107 t = fl.read(n)
2072 2108 except KeyboardInterrupt:
2073 2109 self.ui.warn(_("interrupted"))
2074 2110 raise
2075 2111 except Exception, inst:
2076 2112 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2077 2113
2078 2114 # verify parents
2079 2115 (p1, p2) = fl.parents(n)
2080 2116 if p1 not in nodes:
2081 2117 err(_("file %s:%s unknown parent 1 %s") %
2082 2118 (f, short(n), short(p1)))
2083 2119 if p2 not in nodes:
2084 2120 err(_("file %s:%s unknown parent 2 %s") %
2085 2121 (f, short(n), short(p1)))
2086 2122 nodes[n] = 1
2087 2123
2088 2124 # cross-check
2089 2125 for node in filenodes[f]:
2090 2126 err(_("node %s in manifests not in %s") % (hex(node), f))
2091 2127
2092 2128 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2093 2129 (files, changesets, revisions))
2094 2130
2095 2131 if warnings[0]:
2096 2132 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2097 2133 if errors[0]:
2098 2134 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2099 2135 return 1
2100 2136
2101 2137 # used to avoid circular references so destructors work
2102 2138 def aftertrans(base):
2103 2139 p = base
2104 2140 def a():
2105 2141 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2106 2142 util.rename(os.path.join(p, "journal.dirstate"),
2107 2143 os.path.join(p, "undo.dirstate"))
2108 2144 return a
2109 2145
@@ -1,162 +1,187 b''
1 1 # sshrepo.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from remoterepo import *
10 10 from i18n import gettext as _
11 11 from demandload import *
12 12 demandload(globals(), "hg os re stat util")
13 13
14 14 class sshrepository(remoterepository):
15 15 def __init__(self, ui, path):
16 16 self.url = path
17 17 self.ui = ui
18 18
19 19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
20 20 if not m:
21 21 raise hg.RepoError(_("couldn't parse destination %s") % path)
22 22
23 23 self.user = m.group(2)
24 24 self.host = m.group(3)
25 25 self.port = m.group(5)
26 26 self.path = m.group(7) or "."
27 27
28 28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
29 29 args = self.port and ("%s -p %s") % (args, self.port) or args
30 30
31 31 sshcmd = self.ui.config("ui", "ssh", "ssh")
32 32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
33 33 cmd = '%s %s "%s -R %s serve --stdio"'
34 34 cmd = cmd % (sshcmd, args, remotecmd, self.path)
35 35
36 36 ui.note('running %s\n' % cmd)
37 37 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
38 38
39 39 # skip any noise generated by remote shell
40 40 self.do_cmd("hello")
41 41 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
42 42 lines = ["", "dummy"]
43 43 max_noise = 500
44 44 while lines[-1] and max_noise:
45 45 l = r.readline()
46 46 self.readerr()
47 47 if lines[-1] == "1\n" and l == "\n":
48 48 break
49 49 if l:
50 50 ui.debug(_("remote: "), l)
51 51 lines.append(l)
52 52 max_noise -= 1
53 53 else:
54 54 if l1:
55 55 ui.debug(_("remote: "), l1)
56 56 raise hg.RepoError(_("no response from remote hg"))
57 57
58 58 self.capabilities = ()
59 59 lines.reverse()
60 60 for l in lines:
61 61 if l.startswith("capabilities:"):
62 62 self.capabilities = l[:-1].split(":")[1].split()
63 63 break
64 64
65 65 def readerr(self):
66 66 while 1:
67 67 size = util.fstat(self.pipee).st_size
68 68 if size == 0: break
69 69 l = self.pipee.readline()
70 70 if not l: break
71 71 self.ui.status(_("remote: "), l)
72 72
73 73 def __del__(self):
74 74 try:
75 75 self.pipeo.close()
76 76 self.pipei.close()
77 77 # read the error descriptor until EOF
78 78 for l in self.pipee:
79 79 self.ui.status(_("remote: "), l)
80 80 self.pipee.close()
81 81 except:
82 82 pass
83 83
84 84 def dev(self):
85 85 return -1
86 86
87 87 def do_cmd(self, cmd, **args):
88 88 self.ui.debug(_("sending %s command\n") % cmd)
89 89 self.pipeo.write("%s\n" % cmd)
90 90 for k, v in args.items():
91 91 self.pipeo.write("%s %d\n" % (k, len(v)))
92 92 self.pipeo.write(v)
93 93 self.pipeo.flush()
94 94
95 95 return self.pipei
96 96
97 97 def call(self, cmd, **args):
98 98 r = self.do_cmd(cmd, **args)
99 99 l = r.readline()
100 100 self.readerr()
101 101 try:
102 102 l = int(l)
103 103 except:
104 104 raise hg.RepoError(_("unexpected response '%s'") % l)
105 105 return r.read(l)
106 106
107 107 def lock(self):
108 108 self.call("lock")
109 109 return remotelock(self)
110 110
111 111 def unlock(self):
112 112 self.call("unlock")
113 113
114 114 def heads(self):
115 115 d = self.call("heads")
116 116 try:
117 117 return map(bin, d[:-1].split(" "))
118 118 except:
119 119 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
120 120
121 121 def branches(self, nodes):
122 122 n = " ".join(map(hex, nodes))
123 123 d = self.call("branches", nodes=n)
124 124 try:
125 125 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
126 126 return br
127 127 except:
128 128 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
129 129
130 130 def between(self, pairs):
131 131 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
132 132 d = self.call("between", pairs=n)
133 133 try:
134 134 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
135 135 return p
136 136 except:
137 137 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
138 138
139 139 def changegroup(self, nodes, kind):
140 140 n = " ".join(map(hex, nodes))
141 141 f = self.do_cmd("changegroup", roots=n)
142 142 return self.pipei
143 143
144 def unbundle(self, cg, heads, source):
145 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
146 if d:
147 raise hg.RepoError(_("push refused: %s") % d)
148
149 while 1:
150 d = cg.read(4096)
151 if not d: break
152 self.pipeo.write(str(len(d)) + '\n')
153 self.pipeo.write(d)
154 self.readerr()
155
156 self.pipeo.write('0\n')
157 self.pipeo.flush()
158
159 self.readerr()
160 d = self.pipei.readline()
161 if d != '\n':
162 return 1
163
164 l = int(self.pipei.readline())
165 r = self.pipei.read(l)
166 if not r:
167 return 1
168 return int(r)
169
144 170 def addchangegroup(self, cg, source):
145 171 d = self.call("addchangegroup")
146 172 if d:
147 raise hg.RepoError(_("push refused: %s"), d)
148
173 raise hg.RepoError(_("push refused: %s") % d)
149 174 while 1:
150 175 d = cg.read(4096)
151 176 if not d: break
152 177 self.pipeo.write(d)
153 178 self.readerr()
154 179
155 180 self.pipeo.flush()
156 181
157 182 self.readerr()
158 183 l = int(self.pipei.readline())
159 184 r = self.pipei.read(l)
160 185 if not r:
161 186 return 1
162 187 return int(r)
@@ -1,113 +1,169 b''
1 1 # sshserver.py - ssh protocol server support for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from i18n import gettext as _
10 10 from node import *
11 demandload(globals(), "sys util")
11 demandload(globals(), "os sys tempfile util")
12 12
13 13 class sshserver(object):
14 14 def __init__(self, ui, repo):
15 15 self.ui = ui
16 16 self.repo = repo
17 17 self.lock = None
18 18 self.fin = sys.stdin
19 19 self.fout = sys.stdout
20 20
21 21 sys.stdout = sys.stderr
22 22
23 23 # Prevent insertion/deletion of CRs
24 24 util.set_binary(self.fin)
25 25 util.set_binary(self.fout)
26 26
27 27 def getarg(self):
28 28 argline = self.fin.readline()[:-1]
29 29 arg, l = argline.split()
30 30 val = self.fin.read(int(l))
31 31 return arg, val
32 32
33 33 def respond(self, v):
34 34 self.fout.write("%d\n" % len(v))
35 35 self.fout.write(v)
36 36 self.fout.flush()
37 37
38 38 def serve_forever(self):
39 39 while self.serve_one(): pass
40 40 sys.exit(0)
41 41
42 42 def serve_one(self):
43 43 cmd = self.fin.readline()[:-1]
44 44 if cmd:
45 45 impl = getattr(self, 'do_' + cmd, None)
46 46 if impl: impl()
47 47 else: self.respond("")
48 48 return cmd != ''
49 49
50 50 def do_heads(self):
51 51 h = self.repo.heads()
52 52 self.respond(" ".join(map(hex, h)) + "\n")
53 53
54 54 def do_hello(self):
55 55 '''the hello command returns a set of lines describing various
56 56 interesting things about the server, in an RFC822-like format.
57 57 Currently the only one defined is "capabilities", which
58 58 consists of a line in the form:
59 59
60 60 capabilities: space separated list of tokens
61 61 '''
62 62
63 r = "capabilities:\n"
63 r = "capabilities: unbundle\n"
64 64 self.respond(r)
65 65
66 66 def do_lock(self):
67 '''DEPRECATED - allowing remote client to lock repo is not safe'''
68
67 69 self.lock = self.repo.lock()
68 70 self.respond("")
69 71
70 72 def do_unlock(self):
73 '''DEPRECATED'''
74
71 75 if self.lock:
72 76 self.lock.release()
73 77 self.lock = None
74 78 self.respond("")
75 79
76 80 def do_branches(self):
77 81 arg, nodes = self.getarg()
78 82 nodes = map(bin, nodes.split(" "))
79 83 r = []
80 84 for b in self.repo.branches(nodes):
81 85 r.append(" ".join(map(hex, b)) + "\n")
82 86 self.respond("".join(r))
83 87
84 88 def do_between(self):
85 89 arg, pairs = self.getarg()
86 90 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
87 91 r = []
88 92 for b in self.repo.between(pairs):
89 93 r.append(" ".join(map(hex, b)) + "\n")
90 94 self.respond("".join(r))
91 95
92 96 def do_changegroup(self):
93 97 nodes = []
94 98 arg, roots = self.getarg()
95 99 nodes = map(bin, roots.split(" "))
96 100
97 101 cg = self.repo.changegroup(nodes, 'serve')
98 102 while True:
99 103 d = cg.read(4096)
100 104 if not d:
101 105 break
102 106 self.fout.write(d)
103 107
104 108 self.fout.flush()
105 109
106 110 def do_addchangegroup(self):
111 '''DEPRECATED'''
112
107 113 if not self.lock:
108 114 self.respond("not locked")
109 115 return
110 116
111 117 self.respond("")
112 118 r = self.repo.addchangegroup(self.fin, 'serve')
113 119 self.respond(str(r))
120
121 def do_unbundle(self):
122 their_heads = self.getarg()[1].split()
123
124 def check_heads():
125 heads = map(hex, self.repo.heads())
126 return their_heads == [hex('force')] or their_heads == heads
127
128 # fail early if possible
129 if not check_heads():
130 self.respond(_('unsynced changes'))
131 return
132
133 self.respond('')
134
135 # write bundle data to temporary file because it can be big
136
137 try:
138 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
139 fp = os.fdopen(fd, 'wb+')
140
141 count = int(self.fin.readline())
142 while count:
143 fp.write(self.fin.read(count))
144 count = int(self.fin.readline())
145
146 was_locked = self.lock is not None
147 if not was_locked:
148 self.lock = self.repo.lock()
149 try:
150 if not check_heads():
151 # someone else committed/pushed/unbundled while we
152 # were transferring data
153 self.respond(_('unsynced changes'))
154 return
155 self.respond('')
156
157 # push can proceed
158
159 fp.seek(0)
160 r = self.repo.addchangegroup(fp, 'serve')
161 self.respond(str(r))
162 finally:
163 if not was_locked:
164 self.lock.release()
165 self.lock = None
166 finally:
167 fp.close()
168 os.unlink(tempname)
169
@@ -1,70 +1,83 b''
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 21 $2
22 22 EOF
23 23 chmod +x dummyssh
24 24
25 25 echo "# creating 'remote'"
26 26 hg init remote
27 27 cd remote
28 28 echo this > foo
29 29 hg ci -A -m "init" -d "1000000 0" foo
30 30
31 31 cd ..
32 32
33 33 echo "# clone remote"
34 34 hg clone -e ./dummyssh ssh://user@dummy/remote local
35 35
36 36 echo "# verify"
37 37 cd local
38 38 hg verify
39 39
40 40 echo "# empty default pull"
41 41 hg paths
42 42 hg pull -e ../dummyssh
43 43
44 44 echo "# local change"
45 45 echo bleah > foo
46 46 hg ci -m "add" -d "1000000 0"
47 47
48 48 echo "# updating rc"
49 49 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
50 50 echo "[ui]" >> .hg/hgrc
51 51 echo "ssh = ../dummyssh" >> .hg/hgrc
52 52
53 53 echo "# find outgoing"
54 54 hg out ssh://user@dummy/remote
55 55
56 56 echo "# find incoming on the remote side"
57 57 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
58 58
59 59 echo "# push"
60 60 hg push
61 61
62 62 cd ../remote
63 63
64 64 echo "# check remote tip"
65 65 hg tip
66 66 hg verify
67 67 hg cat foo
68 68
69 echo z > z
70 hg ci -A -m z -d '1000001 0' z
71
72 cd ../local
73 echo r > r
74 hg ci -A -m z -d '1000002 0' r
75
76 echo "# push should fail"
77 hg push
78
79 echo "# push should succeed"
80 hg push -f
81
69 82 cd ..
70 83 cat dummylog
@@ -1,62 +1,76 b''
1 1 # creating 'remote'
2 2 # clone remote
3 3 requesting all changes
4 4 adding changesets
5 5 adding manifests
6 6 adding file changes
7 7 added 1 changesets with 1 changes to 1 files
8 8 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
9 9 # verify
10 10 checking changesets
11 11 checking manifests
12 12 crosschecking files in changesets and manifests
13 13 checking files
14 14 1 files, 1 changesets, 1 total revisions
15 15 # empty default pull
16 16 default = ssh://user@dummy/remote
17 17 pulling from ssh://user@dummy/remote
18 18 searching for changes
19 19 no changes found
20 20 # local change
21 21 # updating rc
22 22 # find outgoing
23 23 searching for changes
24 24 changeset: 1:c54836a570be
25 25 tag: tip
26 26 user: test
27 27 date: Mon Jan 12 13:46:40 1970 +0000
28 28 summary: add
29 29
30 30 # find incoming on the remote side
31 31 searching for changes
32 32 changeset: 1:c54836a570be
33 33 tag: tip
34 34 user: test
35 35 date: Mon Jan 12 13:46:40 1970 +0000
36 36 summary: add
37 37
38 38 # push
39 39 pushing to ssh://user@dummy/remote
40 40 searching for changes
41 41 remote: adding changesets
42 42 remote: adding manifests
43 43 remote: adding file changes
44 44 remote: added 1 changesets with 1 changes to 1 files
45 45 # check remote tip
46 46 changeset: 1:c54836a570be
47 47 tag: tip
48 48 user: test
49 49 date: Mon Jan 12 13:46:40 1970 +0000
50 50 summary: add
51 51
52 52 checking changesets
53 53 checking manifests
54 54 crosschecking files in changesets and manifests
55 55 checking files
56 56 1 files, 2 changesets, 2 total revisions
57 57 bleah
58 # push should fail
59 pushing to ssh://user@dummy/remote
60 searching for changes
61 abort: unsynced remote changes!
62 (did you forget to sync? use push -f to force)
63 # push should succeed
64 pushing to ssh://user@dummy/remote
65 searching for changes
66 remote: adding changesets
67 remote: adding manifests
68 remote: adding file changes
69 remote: added 1 changesets with 1 changes to 1 files
58 70 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
59 71 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
60 72 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
61 73 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
62 74 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
75 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
76 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
General Comments 0
You need to be logged in to leave comments. Login now