##// END OF EJS Templates
If we can't write the branch cache, fail quietly.
Matt Mackall -
r3452:fcf14d87 default
parent child Browse files
Show More
@@ -1,1814 +1,1817 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {} # avoid recursion in changectx
296 self.branchcache = {} # avoid recursion in changectx
297
297
298 try:
298 try:
299 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
302 if (lrev < self.changelog.count() and
302 if (lrev < self.changelog.count() and
303 self.changelog.node(lrev) == last): # sanity check
303 self.changelog.node(lrev) == last): # sanity check
304 for l in f:
304 for l in f:
305 node, label = l.rstrip().split(" ", 1)
305 node, label = l.rstrip().split(" ", 1)
306 self.branchcache[label] = bin(node)
306 self.branchcache[label] = bin(node)
307 else: # invalidate the cache
307 else: # invalidate the cache
308 last, lrev = nullid, -1
308 last, lrev = nullid, -1
309 f.close()
309 f.close()
310 except IOError:
310 except IOError:
311 last, lrev = nullid, -1
311 last, lrev = nullid, -1
312
312
313 tip = self.changelog.count() - 1
313 tip = self.changelog.count() - 1
314 if lrev != tip:
314 if lrev != tip:
315 for r in xrange(lrev + 1, tip + 1):
315 for r in xrange(lrev + 1, tip + 1):
316 c = self.changectx(r)
316 c = self.changectx(r)
317 b = c.branch()
317 b = c.branch()
318 if b:
318 if b:
319 self.branchcache[b] = c.node()
319 self.branchcache[b] = c.node()
320 self._writebranchcache()
320 self._writebranchcache()
321
321
322 return self.branchcache
322 return self.branchcache
323
323
324 def _writebranchcache(self):
324 def _writebranchcache(self):
325 try:
325 f = self.opener("branches.cache", "w")
326 f = self.opener("branches.cache", "w")
326 t = self.changelog.tip()
327 t = self.changelog.tip()
327 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
328 for label, node in self.branchcache.iteritems():
329 for label, node in self.branchcache.iteritems():
329 f.write("%s %s\n" % (hex(node), label))
330 f.write("%s %s\n" % (hex(node), label))
331 except IOError:
332 pass
330
333
331 def lookup(self, key):
334 def lookup(self, key):
332 if key == '.':
335 if key == '.':
333 key = self.dirstate.parents()[0]
336 key = self.dirstate.parents()[0]
334 if key == nullid:
337 if key == nullid:
335 raise repo.RepoError(_("no revision checked out"))
338 raise repo.RepoError(_("no revision checked out"))
336 if key in self.tags():
339 if key in self.tags():
337 return self.tags()[key]
340 return self.tags()[key]
338 if key in self.branchtags():
341 if key in self.branchtags():
339 return self.branchtags()[key]
342 return self.branchtags()[key]
340 try:
343 try:
341 return self.changelog.lookup(key)
344 return self.changelog.lookup(key)
342 except:
345 except:
343 raise repo.RepoError(_("unknown revision '%s'") % key)
346 raise repo.RepoError(_("unknown revision '%s'") % key)
344
347
345 def dev(self):
348 def dev(self):
346 return os.lstat(self.path).st_dev
349 return os.lstat(self.path).st_dev
347
350
348 def local(self):
351 def local(self):
349 return True
352 return True
350
353
351 def join(self, f):
354 def join(self, f):
352 return os.path.join(self.path, f)
355 return os.path.join(self.path, f)
353
356
354 def wjoin(self, f):
357 def wjoin(self, f):
355 return os.path.join(self.root, f)
358 return os.path.join(self.root, f)
356
359
357 def file(self, f):
360 def file(self, f):
358 if f[0] == '/':
361 if f[0] == '/':
359 f = f[1:]
362 f = f[1:]
360 return filelog.filelog(self.opener, f, self.revlogversion)
363 return filelog.filelog(self.opener, f, self.revlogversion)
361
364
362 def changectx(self, changeid=None):
365 def changectx(self, changeid=None):
363 return context.changectx(self, changeid)
366 return context.changectx(self, changeid)
364
367
365 def workingctx(self):
368 def workingctx(self):
366 return context.workingctx(self)
369 return context.workingctx(self)
367
370
368 def parents(self, changeid=None):
371 def parents(self, changeid=None):
369 '''
372 '''
370 get list of changectxs for parents of changeid or working directory
373 get list of changectxs for parents of changeid or working directory
371 '''
374 '''
372 if changeid is None:
375 if changeid is None:
373 pl = self.dirstate.parents()
376 pl = self.dirstate.parents()
374 else:
377 else:
375 n = self.changelog.lookup(changeid)
378 n = self.changelog.lookup(changeid)
376 pl = self.changelog.parents(n)
379 pl = self.changelog.parents(n)
377 if pl[1] == nullid:
380 if pl[1] == nullid:
378 return [self.changectx(pl[0])]
381 return [self.changectx(pl[0])]
379 return [self.changectx(pl[0]), self.changectx(pl[1])]
382 return [self.changectx(pl[0]), self.changectx(pl[1])]
380
383
381 def filectx(self, path, changeid=None, fileid=None):
384 def filectx(self, path, changeid=None, fileid=None):
382 """changeid can be a changeset revision, node, or tag.
385 """changeid can be a changeset revision, node, or tag.
383 fileid can be a file revision or node."""
386 fileid can be a file revision or node."""
384 return context.filectx(self, path, changeid, fileid)
387 return context.filectx(self, path, changeid, fileid)
385
388
386 def getcwd(self):
389 def getcwd(self):
387 return self.dirstate.getcwd()
390 return self.dirstate.getcwd()
388
391
389 def wfile(self, f, mode='r'):
392 def wfile(self, f, mode='r'):
390 return self.wopener(f, mode)
393 return self.wopener(f, mode)
391
394
392 def wread(self, filename):
395 def wread(self, filename):
393 if self.encodepats == None:
396 if self.encodepats == None:
394 l = []
397 l = []
395 for pat, cmd in self.ui.configitems("encode"):
398 for pat, cmd in self.ui.configitems("encode"):
396 mf = util.matcher(self.root, "", [pat], [], [])[1]
399 mf = util.matcher(self.root, "", [pat], [], [])[1]
397 l.append((mf, cmd))
400 l.append((mf, cmd))
398 self.encodepats = l
401 self.encodepats = l
399
402
400 data = self.wopener(filename, 'r').read()
403 data = self.wopener(filename, 'r').read()
401
404
402 for mf, cmd in self.encodepats:
405 for mf, cmd in self.encodepats:
403 if mf(filename):
406 if mf(filename):
404 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
407 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
405 data = util.filter(data, cmd)
408 data = util.filter(data, cmd)
406 break
409 break
407
410
408 return data
411 return data
409
412
410 def wwrite(self, filename, data, fd=None):
413 def wwrite(self, filename, data, fd=None):
411 if self.decodepats == None:
414 if self.decodepats == None:
412 l = []
415 l = []
413 for pat, cmd in self.ui.configitems("decode"):
416 for pat, cmd in self.ui.configitems("decode"):
414 mf = util.matcher(self.root, "", [pat], [], [])[1]
417 mf = util.matcher(self.root, "", [pat], [], [])[1]
415 l.append((mf, cmd))
418 l.append((mf, cmd))
416 self.decodepats = l
419 self.decodepats = l
417
420
418 for mf, cmd in self.decodepats:
421 for mf, cmd in self.decodepats:
419 if mf(filename):
422 if mf(filename):
420 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
423 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
421 data = util.filter(data, cmd)
424 data = util.filter(data, cmd)
422 break
425 break
423
426
424 if fd:
427 if fd:
425 return fd.write(data)
428 return fd.write(data)
426 return self.wopener(filename, 'w').write(data)
429 return self.wopener(filename, 'w').write(data)
427
430
428 def transaction(self):
431 def transaction(self):
429 tr = self.transhandle
432 tr = self.transhandle
430 if tr != None and tr.running():
433 if tr != None and tr.running():
431 return tr.nest()
434 return tr.nest()
432
435
433 # save dirstate for rollback
436 # save dirstate for rollback
434 try:
437 try:
435 ds = self.opener("dirstate").read()
438 ds = self.opener("dirstate").read()
436 except IOError:
439 except IOError:
437 ds = ""
440 ds = ""
438 self.opener("journal.dirstate", "w").write(ds)
441 self.opener("journal.dirstate", "w").write(ds)
439
442
440 tr = transaction.transaction(self.ui.warn, self.opener,
443 tr = transaction.transaction(self.ui.warn, self.opener,
441 self.join("journal"),
444 self.join("journal"),
442 aftertrans(self.path))
445 aftertrans(self.path))
443 self.transhandle = tr
446 self.transhandle = tr
444 return tr
447 return tr
445
448
446 def recover(self):
449 def recover(self):
447 l = self.lock()
450 l = self.lock()
448 if os.path.exists(self.join("journal")):
451 if os.path.exists(self.join("journal")):
449 self.ui.status(_("rolling back interrupted transaction\n"))
452 self.ui.status(_("rolling back interrupted transaction\n"))
450 transaction.rollback(self.opener, self.join("journal"))
453 transaction.rollback(self.opener, self.join("journal"))
451 self.reload()
454 self.reload()
452 return True
455 return True
453 else:
456 else:
454 self.ui.warn(_("no interrupted transaction available\n"))
457 self.ui.warn(_("no interrupted transaction available\n"))
455 return False
458 return False
456
459
457 def rollback(self, wlock=None):
460 def rollback(self, wlock=None):
458 if not wlock:
461 if not wlock:
459 wlock = self.wlock()
462 wlock = self.wlock()
460 l = self.lock()
463 l = self.lock()
461 if os.path.exists(self.join("undo")):
464 if os.path.exists(self.join("undo")):
462 self.ui.status(_("rolling back last transaction\n"))
465 self.ui.status(_("rolling back last transaction\n"))
463 transaction.rollback(self.opener, self.join("undo"))
466 transaction.rollback(self.opener, self.join("undo"))
464 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
467 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
465 self.reload()
468 self.reload()
466 self.wreload()
469 self.wreload()
467 else:
470 else:
468 self.ui.warn(_("no rollback information available\n"))
471 self.ui.warn(_("no rollback information available\n"))
469
472
470 def wreload(self):
473 def wreload(self):
471 self.dirstate.read()
474 self.dirstate.read()
472
475
473 def reload(self):
476 def reload(self):
474 self.changelog.load()
477 self.changelog.load()
475 self.manifest.load()
478 self.manifest.load()
476 self.tagscache = None
479 self.tagscache = None
477 self.nodetagscache = None
480 self.nodetagscache = None
478
481
479 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
482 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
480 desc=None):
483 desc=None):
481 try:
484 try:
482 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
485 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
483 except lock.LockHeld, inst:
486 except lock.LockHeld, inst:
484 if not wait:
487 if not wait:
485 raise
488 raise
486 self.ui.warn(_("waiting for lock on %s held by %s\n") %
489 self.ui.warn(_("waiting for lock on %s held by %s\n") %
487 (desc, inst.args[0]))
490 (desc, inst.args[0]))
488 # default to 600 seconds timeout
491 # default to 600 seconds timeout
489 l = lock.lock(self.join(lockname),
492 l = lock.lock(self.join(lockname),
490 int(self.ui.config("ui", "timeout") or 600),
493 int(self.ui.config("ui", "timeout") or 600),
491 releasefn, desc=desc)
494 releasefn, desc=desc)
492 if acquirefn:
495 if acquirefn:
493 acquirefn()
496 acquirefn()
494 return l
497 return l
495
498
496 def lock(self, wait=1):
499 def lock(self, wait=1):
497 return self.do_lock("lock", wait, acquirefn=self.reload,
500 return self.do_lock("lock", wait, acquirefn=self.reload,
498 desc=_('repository %s') % self.origroot)
501 desc=_('repository %s') % self.origroot)
499
502
500 def wlock(self, wait=1):
503 def wlock(self, wait=1):
501 return self.do_lock("wlock", wait, self.dirstate.write,
504 return self.do_lock("wlock", wait, self.dirstate.write,
502 self.wreload,
505 self.wreload,
503 desc=_('working directory of %s') % self.origroot)
506 desc=_('working directory of %s') % self.origroot)
504
507
505 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
508 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
506 """
509 """
507 commit an individual file as part of a larger transaction
510 commit an individual file as part of a larger transaction
508 """
511 """
509
512
510 t = self.wread(fn)
513 t = self.wread(fn)
511 fl = self.file(fn)
514 fl = self.file(fn)
512 fp1 = manifest1.get(fn, nullid)
515 fp1 = manifest1.get(fn, nullid)
513 fp2 = manifest2.get(fn, nullid)
516 fp2 = manifest2.get(fn, nullid)
514
517
515 meta = {}
518 meta = {}
516 cp = self.dirstate.copied(fn)
519 cp = self.dirstate.copied(fn)
517 if cp:
520 if cp:
518 meta["copy"] = cp
521 meta["copy"] = cp
519 if not manifest2: # not a branch merge
522 if not manifest2: # not a branch merge
520 meta["copyrev"] = hex(manifest1.get(cp, nullid))
523 meta["copyrev"] = hex(manifest1.get(cp, nullid))
521 fp2 = nullid
524 fp2 = nullid
522 elif fp2 != nullid: # copied on remote side
525 elif fp2 != nullid: # copied on remote side
523 meta["copyrev"] = hex(manifest1.get(cp, nullid))
526 meta["copyrev"] = hex(manifest1.get(cp, nullid))
524 else: # copied on local side, reversed
527 else: # copied on local side, reversed
525 meta["copyrev"] = hex(manifest2.get(cp))
528 meta["copyrev"] = hex(manifest2.get(cp))
526 fp2 = nullid
529 fp2 = nullid
527 self.ui.debug(_(" %s: copy %s:%s\n") %
530 self.ui.debug(_(" %s: copy %s:%s\n") %
528 (fn, cp, meta["copyrev"]))
531 (fn, cp, meta["copyrev"]))
529 fp1 = nullid
532 fp1 = nullid
530 elif fp2 != nullid:
533 elif fp2 != nullid:
531 # is one parent an ancestor of the other?
534 # is one parent an ancestor of the other?
532 fpa = fl.ancestor(fp1, fp2)
535 fpa = fl.ancestor(fp1, fp2)
533 if fpa == fp1:
536 if fpa == fp1:
534 fp1, fp2 = fp2, nullid
537 fp1, fp2 = fp2, nullid
535 elif fpa == fp2:
538 elif fpa == fp2:
536 fp2 = nullid
539 fp2 = nullid
537
540
538 # is the file unmodified from the parent? report existing entry
541 # is the file unmodified from the parent? report existing entry
539 if fp2 == nullid and not fl.cmp(fp1, t):
542 if fp2 == nullid and not fl.cmp(fp1, t):
540 return fp1
543 return fp1
541
544
542 changelist.append(fn)
545 changelist.append(fn)
543 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
546 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
544
547
545 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
548 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
546 orig_parent = self.dirstate.parents()[0] or nullid
549 orig_parent = self.dirstate.parents()[0] or nullid
547 p1 = p1 or self.dirstate.parents()[0] or nullid
550 p1 = p1 or self.dirstate.parents()[0] or nullid
548 p2 = p2 or self.dirstate.parents()[1] or nullid
551 p2 = p2 or self.dirstate.parents()[1] or nullid
549 c1 = self.changelog.read(p1)
552 c1 = self.changelog.read(p1)
550 c2 = self.changelog.read(p2)
553 c2 = self.changelog.read(p2)
551 m1 = self.manifest.read(c1[0]).copy()
554 m1 = self.manifest.read(c1[0]).copy()
552 m2 = self.manifest.read(c2[0])
555 m2 = self.manifest.read(c2[0])
553 changed = []
556 changed = []
554 removed = []
557 removed = []
555
558
556 if orig_parent == p1:
559 if orig_parent == p1:
557 update_dirstate = 1
560 update_dirstate = 1
558 else:
561 else:
559 update_dirstate = 0
562 update_dirstate = 0
560
563
561 if not wlock:
564 if not wlock:
562 wlock = self.wlock()
565 wlock = self.wlock()
563 l = self.lock()
566 l = self.lock()
564 tr = self.transaction()
567 tr = self.transaction()
565 linkrev = self.changelog.count()
568 linkrev = self.changelog.count()
566 for f in files:
569 for f in files:
567 try:
570 try:
568 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
571 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
569 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
572 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
570 except IOError:
573 except IOError:
571 try:
574 try:
572 del m1[f]
575 del m1[f]
573 if update_dirstate:
576 if update_dirstate:
574 self.dirstate.forget([f])
577 self.dirstate.forget([f])
575 removed.append(f)
578 removed.append(f)
576 except:
579 except:
577 # deleted from p2?
580 # deleted from p2?
578 pass
581 pass
579
582
580 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
583 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
581 user = user or self.ui.username()
584 user = user or self.ui.username()
582 n = self.changelog.add(mnode, changed + removed, text,
585 n = self.changelog.add(mnode, changed + removed, text,
583 tr, p1, p2, user, date)
586 tr, p1, p2, user, date)
584 tr.close()
587 tr.close()
585 if update_dirstate:
588 if update_dirstate:
586 self.dirstate.setparents(n, nullid)
589 self.dirstate.setparents(n, nullid)
587
590
588 def commit(self, files=None, text="", user=None, date=None,
591 def commit(self, files=None, text="", user=None, date=None,
589 match=util.always, force=False, lock=None, wlock=None,
592 match=util.always, force=False, lock=None, wlock=None,
590 force_editor=False):
593 force_editor=False):
591 commit = []
594 commit = []
592 remove = []
595 remove = []
593 changed = []
596 changed = []
594
597
595 if files:
598 if files:
596 for f in files:
599 for f in files:
597 s = self.dirstate.state(f)
600 s = self.dirstate.state(f)
598 if s in 'nmai':
601 if s in 'nmai':
599 commit.append(f)
602 commit.append(f)
600 elif s == 'r':
603 elif s == 'r':
601 remove.append(f)
604 remove.append(f)
602 else:
605 else:
603 self.ui.warn(_("%s not tracked!\n") % f)
606 self.ui.warn(_("%s not tracked!\n") % f)
604 else:
607 else:
605 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
608 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
606 commit = modified + added
609 commit = modified + added
607 remove = removed
610 remove = removed
608
611
609 p1, p2 = self.dirstate.parents()
612 p1, p2 = self.dirstate.parents()
610 c1 = self.changelog.read(p1)
613 c1 = self.changelog.read(p1)
611 c2 = self.changelog.read(p2)
614 c2 = self.changelog.read(p2)
612 m1 = self.manifest.read(c1[0]).copy()
615 m1 = self.manifest.read(c1[0]).copy()
613 m2 = self.manifest.read(c2[0])
616 m2 = self.manifest.read(c2[0])
614
617
615 branchname = self.workingctx().branch()
618 branchname = self.workingctx().branch()
616 oldname = c1[5].get("branch", "")
619 oldname = c1[5].get("branch", "")
617
620
618 if not commit and not remove and not force and p2 == nullid and \
621 if not commit and not remove and not force and p2 == nullid and \
619 branchname == oldname:
622 branchname == oldname:
620 self.ui.status(_("nothing changed\n"))
623 self.ui.status(_("nothing changed\n"))
621 return None
624 return None
622
625
623 xp1 = hex(p1)
626 xp1 = hex(p1)
624 if p2 == nullid: xp2 = ''
627 if p2 == nullid: xp2 = ''
625 else: xp2 = hex(p2)
628 else: xp2 = hex(p2)
626
629
627 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
630 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
628
631
629 if not wlock:
632 if not wlock:
630 wlock = self.wlock()
633 wlock = self.wlock()
631 if not lock:
634 if not lock:
632 lock = self.lock()
635 lock = self.lock()
633 tr = self.transaction()
636 tr = self.transaction()
634
637
635 # check in files
638 # check in files
636 new = {}
639 new = {}
637 linkrev = self.changelog.count()
640 linkrev = self.changelog.count()
638 commit.sort()
641 commit.sort()
639 for f in commit:
642 for f in commit:
640 self.ui.note(f + "\n")
643 self.ui.note(f + "\n")
641 try:
644 try:
642 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
645 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
643 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
646 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
644 except IOError:
647 except IOError:
645 self.ui.warn(_("trouble committing %s!\n") % f)
648 self.ui.warn(_("trouble committing %s!\n") % f)
646 raise
649 raise
647
650
648 # update manifest
651 # update manifest
649 m1.update(new)
652 m1.update(new)
650 for f in remove:
653 for f in remove:
651 if f in m1:
654 if f in m1:
652 del m1[f]
655 del m1[f]
653 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
656 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
654
657
655 # add changeset
658 # add changeset
656 new = new.keys()
659 new = new.keys()
657 new.sort()
660 new.sort()
658
661
659 user = user or self.ui.username()
662 user = user or self.ui.username()
660 if not text or force_editor:
663 if not text or force_editor:
661 edittext = []
664 edittext = []
662 if text:
665 if text:
663 edittext.append(text)
666 edittext.append(text)
664 edittext.append("")
667 edittext.append("")
665 if p2 != nullid:
668 if p2 != nullid:
666 edittext.append("HG: branch merge")
669 edittext.append("HG: branch merge")
667 edittext.extend(["HG: changed %s" % f for f in changed])
670 edittext.extend(["HG: changed %s" % f for f in changed])
668 edittext.extend(["HG: removed %s" % f for f in remove])
671 edittext.extend(["HG: removed %s" % f for f in remove])
669 if not changed and not remove:
672 if not changed and not remove:
670 edittext.append("HG: no files changed")
673 edittext.append("HG: no files changed")
671 edittext.append("")
674 edittext.append("")
672 # run editor in the repository root
675 # run editor in the repository root
673 olddir = os.getcwd()
676 olddir = os.getcwd()
674 os.chdir(self.root)
677 os.chdir(self.root)
675 text = self.ui.edit("\n".join(edittext), user)
678 text = self.ui.edit("\n".join(edittext), user)
676 os.chdir(olddir)
679 os.chdir(olddir)
677
680
678 lines = [line.rstrip() for line in text.rstrip().splitlines()]
681 lines = [line.rstrip() for line in text.rstrip().splitlines()]
679 while lines and not lines[0]:
682 while lines and not lines[0]:
680 del lines[0]
683 del lines[0]
681 if not lines:
684 if not lines:
682 return None
685 return None
683 text = '\n'.join(lines)
686 text = '\n'.join(lines)
684 extra = {}
687 extra = {}
685 if branchname:
688 if branchname:
686 extra["branch"] = branchname
689 extra["branch"] = branchname
687 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
690 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
688 user, date, extra)
691 user, date, extra)
689 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
692 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
690 parent2=xp2)
693 parent2=xp2)
691 tr.close()
694 tr.close()
692
695
693 self.dirstate.setparents(n)
696 self.dirstate.setparents(n)
694 self.dirstate.update(new, "n")
697 self.dirstate.update(new, "n")
695 self.dirstate.forget(remove)
698 self.dirstate.forget(remove)
696
699
697 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
700 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
698 return n
701 return n
699
702
700 def walk(self, node=None, files=[], match=util.always, badmatch=None):
703 def walk(self, node=None, files=[], match=util.always, badmatch=None):
701 if node:
704 if node:
702 fdict = dict.fromkeys(files)
705 fdict = dict.fromkeys(files)
703 for fn in self.manifest.read(self.changelog.read(node)[0]):
706 for fn in self.manifest.read(self.changelog.read(node)[0]):
704 for ffn in fdict:
707 for ffn in fdict:
705 # match if the file is the exact name or a directory
708 # match if the file is the exact name or a directory
706 if ffn == fn or fn.startswith("%s/" % ffn):
709 if ffn == fn or fn.startswith("%s/" % ffn):
707 del fdict[ffn]
710 del fdict[ffn]
708 break
711 break
709 if match(fn):
712 if match(fn):
710 yield 'm', fn
713 yield 'm', fn
711 for fn in fdict:
714 for fn in fdict:
712 if badmatch and badmatch(fn):
715 if badmatch and badmatch(fn):
713 if match(fn):
716 if match(fn):
714 yield 'b', fn
717 yield 'b', fn
715 else:
718 else:
716 self.ui.warn(_('%s: No such file in rev %s\n') % (
719 self.ui.warn(_('%s: No such file in rev %s\n') % (
717 util.pathto(self.getcwd(), fn), short(node)))
720 util.pathto(self.getcwd(), fn), short(node)))
718 else:
721 else:
719 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
722 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
720 yield src, fn
723 yield src, fn
721
724
722 def status(self, node1=None, node2=None, files=[], match=util.always,
725 def status(self, node1=None, node2=None, files=[], match=util.always,
723 wlock=None, list_ignored=False, list_clean=False):
726 wlock=None, list_ignored=False, list_clean=False):
724 """return status of files between two nodes or node and working directory
727 """return status of files between two nodes or node and working directory
725
728
726 If node1 is None, use the first dirstate parent instead.
729 If node1 is None, use the first dirstate parent instead.
727 If node2 is None, compare node1 with working directory.
730 If node2 is None, compare node1 with working directory.
728 """
731 """
729
732
730 def fcmp(fn, mf):
733 def fcmp(fn, mf):
731 t1 = self.wread(fn)
734 t1 = self.wread(fn)
732 return self.file(fn).cmp(mf.get(fn, nullid), t1)
735 return self.file(fn).cmp(mf.get(fn, nullid), t1)
733
736
734 def mfmatches(node):
737 def mfmatches(node):
735 change = self.changelog.read(node)
738 change = self.changelog.read(node)
736 mf = self.manifest.read(change[0]).copy()
739 mf = self.manifest.read(change[0]).copy()
737 for fn in mf.keys():
740 for fn in mf.keys():
738 if not match(fn):
741 if not match(fn):
739 del mf[fn]
742 del mf[fn]
740 return mf
743 return mf
741
744
742 modified, added, removed, deleted, unknown = [], [], [], [], []
745 modified, added, removed, deleted, unknown = [], [], [], [], []
743 ignored, clean = [], []
746 ignored, clean = [], []
744
747
745 compareworking = False
748 compareworking = False
746 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
749 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
747 compareworking = True
750 compareworking = True
748
751
749 if not compareworking:
752 if not compareworking:
750 # read the manifest from node1 before the manifest from node2,
753 # read the manifest from node1 before the manifest from node2,
751 # so that we'll hit the manifest cache if we're going through
754 # so that we'll hit the manifest cache if we're going through
752 # all the revisions in parent->child order.
755 # all the revisions in parent->child order.
753 mf1 = mfmatches(node1)
756 mf1 = mfmatches(node1)
754
757
755 # are we comparing the working directory?
758 # are we comparing the working directory?
756 if not node2:
759 if not node2:
757 if not wlock:
760 if not wlock:
758 try:
761 try:
759 wlock = self.wlock(wait=0)
762 wlock = self.wlock(wait=0)
760 except lock.LockException:
763 except lock.LockException:
761 wlock = None
764 wlock = None
762 (lookup, modified, added, removed, deleted, unknown,
765 (lookup, modified, added, removed, deleted, unknown,
763 ignored, clean) = self.dirstate.status(files, match,
766 ignored, clean) = self.dirstate.status(files, match,
764 list_ignored, list_clean)
767 list_ignored, list_clean)
765
768
766 # are we comparing working dir against its parent?
769 # are we comparing working dir against its parent?
767 if compareworking:
770 if compareworking:
768 if lookup:
771 if lookup:
769 # do a full compare of any files that might have changed
772 # do a full compare of any files that might have changed
770 mf2 = mfmatches(self.dirstate.parents()[0])
773 mf2 = mfmatches(self.dirstate.parents()[0])
771 for f in lookup:
774 for f in lookup:
772 if fcmp(f, mf2):
775 if fcmp(f, mf2):
773 modified.append(f)
776 modified.append(f)
774 else:
777 else:
775 clean.append(f)
778 clean.append(f)
776 if wlock is not None:
779 if wlock is not None:
777 self.dirstate.update([f], "n")
780 self.dirstate.update([f], "n")
778 else:
781 else:
779 # we are comparing working dir against non-parent
782 # we are comparing working dir against non-parent
780 # generate a pseudo-manifest for the working dir
783 # generate a pseudo-manifest for the working dir
781 # XXX: create it in dirstate.py ?
784 # XXX: create it in dirstate.py ?
782 mf2 = mfmatches(self.dirstate.parents()[0])
785 mf2 = mfmatches(self.dirstate.parents()[0])
783 for f in lookup + modified + added:
786 for f in lookup + modified + added:
784 mf2[f] = ""
787 mf2[f] = ""
785 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
788 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
786 for f in removed:
789 for f in removed:
787 if f in mf2:
790 if f in mf2:
788 del mf2[f]
791 del mf2[f]
789 else:
792 else:
790 # we are comparing two revisions
793 # we are comparing two revisions
791 mf2 = mfmatches(node2)
794 mf2 = mfmatches(node2)
792
795
793 if not compareworking:
796 if not compareworking:
794 # flush lists from dirstate before comparing manifests
797 # flush lists from dirstate before comparing manifests
795 modified, added, clean = [], [], []
798 modified, added, clean = [], [], []
796
799
797 # make sure to sort the files so we talk to the disk in a
800 # make sure to sort the files so we talk to the disk in a
798 # reasonable order
801 # reasonable order
799 mf2keys = mf2.keys()
802 mf2keys = mf2.keys()
800 mf2keys.sort()
803 mf2keys.sort()
801 for fn in mf2keys:
804 for fn in mf2keys:
802 if mf1.has_key(fn):
805 if mf1.has_key(fn):
803 if mf1.flags(fn) != mf2.flags(fn) or \
806 if mf1.flags(fn) != mf2.flags(fn) or \
804 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
807 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
805 modified.append(fn)
808 modified.append(fn)
806 elif list_clean:
809 elif list_clean:
807 clean.append(fn)
810 clean.append(fn)
808 del mf1[fn]
811 del mf1[fn]
809 else:
812 else:
810 added.append(fn)
813 added.append(fn)
811
814
812 removed = mf1.keys()
815 removed = mf1.keys()
813
816
814 # sort and return results:
817 # sort and return results:
815 for l in modified, added, removed, deleted, unknown, ignored, clean:
818 for l in modified, added, removed, deleted, unknown, ignored, clean:
816 l.sort()
819 l.sort()
817 return (modified, added, removed, deleted, unknown, ignored, clean)
820 return (modified, added, removed, deleted, unknown, ignored, clean)
818
821
819 def add(self, list, wlock=None):
822 def add(self, list, wlock=None):
820 if not wlock:
823 if not wlock:
821 wlock = self.wlock()
824 wlock = self.wlock()
822 for f in list:
825 for f in list:
823 p = self.wjoin(f)
826 p = self.wjoin(f)
824 if not os.path.exists(p):
827 if not os.path.exists(p):
825 self.ui.warn(_("%s does not exist!\n") % f)
828 self.ui.warn(_("%s does not exist!\n") % f)
826 elif not os.path.isfile(p):
829 elif not os.path.isfile(p):
827 self.ui.warn(_("%s not added: only files supported currently\n")
830 self.ui.warn(_("%s not added: only files supported currently\n")
828 % f)
831 % f)
829 elif self.dirstate.state(f) in 'an':
832 elif self.dirstate.state(f) in 'an':
830 self.ui.warn(_("%s already tracked!\n") % f)
833 self.ui.warn(_("%s already tracked!\n") % f)
831 else:
834 else:
832 self.dirstate.update([f], "a")
835 self.dirstate.update([f], "a")
833
836
834 def forget(self, list, wlock=None):
837 def forget(self, list, wlock=None):
835 if not wlock:
838 if not wlock:
836 wlock = self.wlock()
839 wlock = self.wlock()
837 for f in list:
840 for f in list:
838 if self.dirstate.state(f) not in 'ai':
841 if self.dirstate.state(f) not in 'ai':
839 self.ui.warn(_("%s not added!\n") % f)
842 self.ui.warn(_("%s not added!\n") % f)
840 else:
843 else:
841 self.dirstate.forget([f])
844 self.dirstate.forget([f])
842
845
843 def remove(self, list, unlink=False, wlock=None):
846 def remove(self, list, unlink=False, wlock=None):
844 if unlink:
847 if unlink:
845 for f in list:
848 for f in list:
846 try:
849 try:
847 util.unlink(self.wjoin(f))
850 util.unlink(self.wjoin(f))
848 except OSError, inst:
851 except OSError, inst:
849 if inst.errno != errno.ENOENT:
852 if inst.errno != errno.ENOENT:
850 raise
853 raise
851 if not wlock:
854 if not wlock:
852 wlock = self.wlock()
855 wlock = self.wlock()
853 for f in list:
856 for f in list:
854 p = self.wjoin(f)
857 p = self.wjoin(f)
855 if os.path.exists(p):
858 if os.path.exists(p):
856 self.ui.warn(_("%s still exists!\n") % f)
859 self.ui.warn(_("%s still exists!\n") % f)
857 elif self.dirstate.state(f) == 'a':
860 elif self.dirstate.state(f) == 'a':
858 self.dirstate.forget([f])
861 self.dirstate.forget([f])
859 elif f not in self.dirstate:
862 elif f not in self.dirstate:
860 self.ui.warn(_("%s not tracked!\n") % f)
863 self.ui.warn(_("%s not tracked!\n") % f)
861 else:
864 else:
862 self.dirstate.update([f], "r")
865 self.dirstate.update([f], "r")
863
866
864 def undelete(self, list, wlock=None):
867 def undelete(self, list, wlock=None):
865 p = self.dirstate.parents()[0]
868 p = self.dirstate.parents()[0]
866 mn = self.changelog.read(p)[0]
869 mn = self.changelog.read(p)[0]
867 m = self.manifest.read(mn)
870 m = self.manifest.read(mn)
868 if not wlock:
871 if not wlock:
869 wlock = self.wlock()
872 wlock = self.wlock()
870 for f in list:
873 for f in list:
871 if self.dirstate.state(f) not in "r":
874 if self.dirstate.state(f) not in "r":
872 self.ui.warn("%s not removed!\n" % f)
875 self.ui.warn("%s not removed!\n" % f)
873 else:
876 else:
874 t = self.file(f).read(m[f])
877 t = self.file(f).read(m[f])
875 self.wwrite(f, t)
878 self.wwrite(f, t)
876 util.set_exec(self.wjoin(f), m.execf(f))
879 util.set_exec(self.wjoin(f), m.execf(f))
877 self.dirstate.update([f], "n")
880 self.dirstate.update([f], "n")
878
881
879 def copy(self, source, dest, wlock=None):
882 def copy(self, source, dest, wlock=None):
880 p = self.wjoin(dest)
883 p = self.wjoin(dest)
881 if not os.path.exists(p):
884 if not os.path.exists(p):
882 self.ui.warn(_("%s does not exist!\n") % dest)
885 self.ui.warn(_("%s does not exist!\n") % dest)
883 elif not os.path.isfile(p):
886 elif not os.path.isfile(p):
884 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
887 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
885 else:
888 else:
886 if not wlock:
889 if not wlock:
887 wlock = self.wlock()
890 wlock = self.wlock()
888 if self.dirstate.state(dest) == '?':
891 if self.dirstate.state(dest) == '?':
889 self.dirstate.update([dest], "a")
892 self.dirstate.update([dest], "a")
890 self.dirstate.copy(source, dest)
893 self.dirstate.copy(source, dest)
891
894
892 def heads(self, start=None):
895 def heads(self, start=None):
893 heads = self.changelog.heads(start)
896 heads = self.changelog.heads(start)
894 # sort the output in rev descending order
897 # sort the output in rev descending order
895 heads = [(-self.changelog.rev(h), h) for h in heads]
898 heads = [(-self.changelog.rev(h), h) for h in heads]
896 heads.sort()
899 heads.sort()
897 return [n for (r, n) in heads]
900 return [n for (r, n) in heads]
898
901
899 # branchlookup returns a dict giving a list of branches for
902 # branchlookup returns a dict giving a list of branches for
900 # each head. A branch is defined as the tag of a node or
903 # each head. A branch is defined as the tag of a node or
901 # the branch of the node's parents. If a node has multiple
904 # the branch of the node's parents. If a node has multiple
902 # branch tags, tags are eliminated if they are visible from other
905 # branch tags, tags are eliminated if they are visible from other
903 # branch tags.
906 # branch tags.
904 #
907 #
905 # So, for this graph: a->b->c->d->e
908 # So, for this graph: a->b->c->d->e
906 # \ /
909 # \ /
907 # aa -----/
910 # aa -----/
908 # a has tag 2.6.12
911 # a has tag 2.6.12
909 # d has tag 2.6.13
912 # d has tag 2.6.13
910 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
913 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
911 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
914 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
912 # from the list.
915 # from the list.
913 #
916 #
914 # It is possible that more than one head will have the same branch tag.
917 # It is possible that more than one head will have the same branch tag.
915 # callers need to check the result for multiple heads under the same
918 # callers need to check the result for multiple heads under the same
916 # branch tag if that is a problem for them (ie checkout of a specific
919 # branch tag if that is a problem for them (ie checkout of a specific
917 # branch).
920 # branch).
918 #
921 #
919 # passing in a specific branch will limit the depth of the search
922 # passing in a specific branch will limit the depth of the search
920 # through the parents. It won't limit the branches returned in the
923 # through the parents. It won't limit the branches returned in the
921 # result though.
924 # result though.
922 def branchlookup(self, heads=None, branch=None):
925 def branchlookup(self, heads=None, branch=None):
923 if not heads:
926 if not heads:
924 heads = self.heads()
927 heads = self.heads()
925 headt = [ h for h in heads ]
928 headt = [ h for h in heads ]
926 chlog = self.changelog
929 chlog = self.changelog
927 branches = {}
930 branches = {}
928 merges = []
931 merges = []
929 seenmerge = {}
932 seenmerge = {}
930
933
931 # traverse the tree once for each head, recording in the branches
934 # traverse the tree once for each head, recording in the branches
932 # dict which tags are visible from this head. The branches
935 # dict which tags are visible from this head. The branches
933 # dict also records which tags are visible from each tag
936 # dict also records which tags are visible from each tag
934 # while we traverse.
937 # while we traverse.
935 while headt or merges:
938 while headt or merges:
936 if merges:
939 if merges:
937 n, found = merges.pop()
940 n, found = merges.pop()
938 visit = [n]
941 visit = [n]
939 else:
942 else:
940 h = headt.pop()
943 h = headt.pop()
941 visit = [h]
944 visit = [h]
942 found = [h]
945 found = [h]
943 seen = {}
946 seen = {}
944 while visit:
947 while visit:
945 n = visit.pop()
948 n = visit.pop()
946 if n in seen:
949 if n in seen:
947 continue
950 continue
948 pp = chlog.parents(n)
951 pp = chlog.parents(n)
949 tags = self.nodetags(n)
952 tags = self.nodetags(n)
950 if tags:
953 if tags:
951 for x in tags:
954 for x in tags:
952 if x == 'tip':
955 if x == 'tip':
953 continue
956 continue
954 for f in found:
957 for f in found:
955 branches.setdefault(f, {})[n] = 1
958 branches.setdefault(f, {})[n] = 1
956 branches.setdefault(n, {})[n] = 1
959 branches.setdefault(n, {})[n] = 1
957 break
960 break
958 if n not in found:
961 if n not in found:
959 found.append(n)
962 found.append(n)
960 if branch in tags:
963 if branch in tags:
961 continue
964 continue
962 seen[n] = 1
965 seen[n] = 1
963 if pp[1] != nullid and n not in seenmerge:
966 if pp[1] != nullid and n not in seenmerge:
964 merges.append((pp[1], [x for x in found]))
967 merges.append((pp[1], [x for x in found]))
965 seenmerge[n] = 1
968 seenmerge[n] = 1
966 if pp[0] != nullid:
969 if pp[0] != nullid:
967 visit.append(pp[0])
970 visit.append(pp[0])
968 # traverse the branches dict, eliminating branch tags from each
971 # traverse the branches dict, eliminating branch tags from each
969 # head that are visible from another branch tag for that head.
972 # head that are visible from another branch tag for that head.
970 out = {}
973 out = {}
971 viscache = {}
974 viscache = {}
972 for h in heads:
975 for h in heads:
973 def visible(node):
976 def visible(node):
974 if node in viscache:
977 if node in viscache:
975 return viscache[node]
978 return viscache[node]
976 ret = {}
979 ret = {}
977 visit = [node]
980 visit = [node]
978 while visit:
981 while visit:
979 x = visit.pop()
982 x = visit.pop()
980 if x in viscache:
983 if x in viscache:
981 ret.update(viscache[x])
984 ret.update(viscache[x])
982 elif x not in ret:
985 elif x not in ret:
983 ret[x] = 1
986 ret[x] = 1
984 if x in branches:
987 if x in branches:
985 visit[len(visit):] = branches[x].keys()
988 visit[len(visit):] = branches[x].keys()
986 viscache[node] = ret
989 viscache[node] = ret
987 return ret
990 return ret
988 if h not in branches:
991 if h not in branches:
989 continue
992 continue
990 # O(n^2), but somewhat limited. This only searches the
993 # O(n^2), but somewhat limited. This only searches the
991 # tags visible from a specific head, not all the tags in the
994 # tags visible from a specific head, not all the tags in the
992 # whole repo.
995 # whole repo.
993 for b in branches[h]:
996 for b in branches[h]:
994 vis = False
997 vis = False
995 for bb in branches[h].keys():
998 for bb in branches[h].keys():
996 if b != bb:
999 if b != bb:
997 if b in visible(bb):
1000 if b in visible(bb):
998 vis = True
1001 vis = True
999 break
1002 break
1000 if not vis:
1003 if not vis:
1001 l = out.setdefault(h, [])
1004 l = out.setdefault(h, [])
1002 l[len(l):] = self.nodetags(b)
1005 l[len(l):] = self.nodetags(b)
1003 return out
1006 return out
1004
1007
1005 def branches(self, nodes):
1008 def branches(self, nodes):
1006 if not nodes:
1009 if not nodes:
1007 nodes = [self.changelog.tip()]
1010 nodes = [self.changelog.tip()]
1008 b = []
1011 b = []
1009 for n in nodes:
1012 for n in nodes:
1010 t = n
1013 t = n
1011 while 1:
1014 while 1:
1012 p = self.changelog.parents(n)
1015 p = self.changelog.parents(n)
1013 if p[1] != nullid or p[0] == nullid:
1016 if p[1] != nullid or p[0] == nullid:
1014 b.append((t, n, p[0], p[1]))
1017 b.append((t, n, p[0], p[1]))
1015 break
1018 break
1016 n = p[0]
1019 n = p[0]
1017 return b
1020 return b
1018
1021
1019 def between(self, pairs):
1022 def between(self, pairs):
1020 r = []
1023 r = []
1021
1024
1022 for top, bottom in pairs:
1025 for top, bottom in pairs:
1023 n, l, i = top, [], 0
1026 n, l, i = top, [], 0
1024 f = 1
1027 f = 1
1025
1028
1026 while n != bottom:
1029 while n != bottom:
1027 p = self.changelog.parents(n)[0]
1030 p = self.changelog.parents(n)[0]
1028 if i == f:
1031 if i == f:
1029 l.append(n)
1032 l.append(n)
1030 f = f * 2
1033 f = f * 2
1031 n = p
1034 n = p
1032 i += 1
1035 i += 1
1033
1036
1034 r.append(l)
1037 r.append(l)
1035
1038
1036 return r
1039 return r
1037
1040
1038 def findincoming(self, remote, base=None, heads=None, force=False):
1041 def findincoming(self, remote, base=None, heads=None, force=False):
1039 """Return list of roots of the subsets of missing nodes from remote
1042 """Return list of roots of the subsets of missing nodes from remote
1040
1043
1041 If base dict is specified, assume that these nodes and their parents
1044 If base dict is specified, assume that these nodes and their parents
1042 exist on the remote side and that no child of a node of base exists
1045 exist on the remote side and that no child of a node of base exists
1043 in both remote and self.
1046 in both remote and self.
1044 Furthermore base will be updated to include the nodes that exists
1047 Furthermore base will be updated to include the nodes that exists
1045 in self and remote but no children exists in self and remote.
1048 in self and remote but no children exists in self and remote.
1046 If a list of heads is specified, return only nodes which are heads
1049 If a list of heads is specified, return only nodes which are heads
1047 or ancestors of these heads.
1050 or ancestors of these heads.
1048
1051
1049 All the ancestors of base are in self and in remote.
1052 All the ancestors of base are in self and in remote.
1050 All the descendants of the list returned are missing in self.
1053 All the descendants of the list returned are missing in self.
1051 (and so we know that the rest of the nodes are missing in remote, see
1054 (and so we know that the rest of the nodes are missing in remote, see
1052 outgoing)
1055 outgoing)
1053 """
1056 """
1054 m = self.changelog.nodemap
1057 m = self.changelog.nodemap
1055 search = []
1058 search = []
1056 fetch = {}
1059 fetch = {}
1057 seen = {}
1060 seen = {}
1058 seenbranch = {}
1061 seenbranch = {}
1059 if base == None:
1062 if base == None:
1060 base = {}
1063 base = {}
1061
1064
1062 if not heads:
1065 if not heads:
1063 heads = remote.heads()
1066 heads = remote.heads()
1064
1067
1065 if self.changelog.tip() == nullid:
1068 if self.changelog.tip() == nullid:
1066 base[nullid] = 1
1069 base[nullid] = 1
1067 if heads != [nullid]:
1070 if heads != [nullid]:
1068 return [nullid]
1071 return [nullid]
1069 return []
1072 return []
1070
1073
1071 # assume we're closer to the tip than the root
1074 # assume we're closer to the tip than the root
1072 # and start by examining the heads
1075 # and start by examining the heads
1073 self.ui.status(_("searching for changes\n"))
1076 self.ui.status(_("searching for changes\n"))
1074
1077
1075 unknown = []
1078 unknown = []
1076 for h in heads:
1079 for h in heads:
1077 if h not in m:
1080 if h not in m:
1078 unknown.append(h)
1081 unknown.append(h)
1079 else:
1082 else:
1080 base[h] = 1
1083 base[h] = 1
1081
1084
1082 if not unknown:
1085 if not unknown:
1083 return []
1086 return []
1084
1087
1085 req = dict.fromkeys(unknown)
1088 req = dict.fromkeys(unknown)
1086 reqcnt = 0
1089 reqcnt = 0
1087
1090
1088 # search through remote branches
1091 # search through remote branches
1089 # a 'branch' here is a linear segment of history, with four parts:
1092 # a 'branch' here is a linear segment of history, with four parts:
1090 # head, root, first parent, second parent
1093 # head, root, first parent, second parent
1091 # (a branch always has two parents (or none) by definition)
1094 # (a branch always has two parents (or none) by definition)
1092 unknown = remote.branches(unknown)
1095 unknown = remote.branches(unknown)
1093 while unknown:
1096 while unknown:
1094 r = []
1097 r = []
1095 while unknown:
1098 while unknown:
1096 n = unknown.pop(0)
1099 n = unknown.pop(0)
1097 if n[0] in seen:
1100 if n[0] in seen:
1098 continue
1101 continue
1099
1102
1100 self.ui.debug(_("examining %s:%s\n")
1103 self.ui.debug(_("examining %s:%s\n")
1101 % (short(n[0]), short(n[1])))
1104 % (short(n[0]), short(n[1])))
1102 if n[0] == nullid: # found the end of the branch
1105 if n[0] == nullid: # found the end of the branch
1103 pass
1106 pass
1104 elif n in seenbranch:
1107 elif n in seenbranch:
1105 self.ui.debug(_("branch already found\n"))
1108 self.ui.debug(_("branch already found\n"))
1106 continue
1109 continue
1107 elif n[1] and n[1] in m: # do we know the base?
1110 elif n[1] and n[1] in m: # do we know the base?
1108 self.ui.debug(_("found incomplete branch %s:%s\n")
1111 self.ui.debug(_("found incomplete branch %s:%s\n")
1109 % (short(n[0]), short(n[1])))
1112 % (short(n[0]), short(n[1])))
1110 search.append(n) # schedule branch range for scanning
1113 search.append(n) # schedule branch range for scanning
1111 seenbranch[n] = 1
1114 seenbranch[n] = 1
1112 else:
1115 else:
1113 if n[1] not in seen and n[1] not in fetch:
1116 if n[1] not in seen and n[1] not in fetch:
1114 if n[2] in m and n[3] in m:
1117 if n[2] in m and n[3] in m:
1115 self.ui.debug(_("found new changeset %s\n") %
1118 self.ui.debug(_("found new changeset %s\n") %
1116 short(n[1]))
1119 short(n[1]))
1117 fetch[n[1]] = 1 # earliest unknown
1120 fetch[n[1]] = 1 # earliest unknown
1118 for p in n[2:4]:
1121 for p in n[2:4]:
1119 if p in m:
1122 if p in m:
1120 base[p] = 1 # latest known
1123 base[p] = 1 # latest known
1121
1124
1122 for p in n[2:4]:
1125 for p in n[2:4]:
1123 if p not in req and p not in m:
1126 if p not in req and p not in m:
1124 r.append(p)
1127 r.append(p)
1125 req[p] = 1
1128 req[p] = 1
1126 seen[n[0]] = 1
1129 seen[n[0]] = 1
1127
1130
1128 if r:
1131 if r:
1129 reqcnt += 1
1132 reqcnt += 1
1130 self.ui.debug(_("request %d: %s\n") %
1133 self.ui.debug(_("request %d: %s\n") %
1131 (reqcnt, " ".join(map(short, r))))
1134 (reqcnt, " ".join(map(short, r))))
1132 for p in range(0, len(r), 10):
1135 for p in range(0, len(r), 10):
1133 for b in remote.branches(r[p:p+10]):
1136 for b in remote.branches(r[p:p+10]):
1134 self.ui.debug(_("received %s:%s\n") %
1137 self.ui.debug(_("received %s:%s\n") %
1135 (short(b[0]), short(b[1])))
1138 (short(b[0]), short(b[1])))
1136 unknown.append(b)
1139 unknown.append(b)
1137
1140
1138 # do binary search on the branches we found
1141 # do binary search on the branches we found
1139 while search:
1142 while search:
1140 n = search.pop(0)
1143 n = search.pop(0)
1141 reqcnt += 1
1144 reqcnt += 1
1142 l = remote.between([(n[0], n[1])])[0]
1145 l = remote.between([(n[0], n[1])])[0]
1143 l.append(n[1])
1146 l.append(n[1])
1144 p = n[0]
1147 p = n[0]
1145 f = 1
1148 f = 1
1146 for i in l:
1149 for i in l:
1147 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1150 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1148 if i in m:
1151 if i in m:
1149 if f <= 2:
1152 if f <= 2:
1150 self.ui.debug(_("found new branch changeset %s\n") %
1153 self.ui.debug(_("found new branch changeset %s\n") %
1151 short(p))
1154 short(p))
1152 fetch[p] = 1
1155 fetch[p] = 1
1153 base[i] = 1
1156 base[i] = 1
1154 else:
1157 else:
1155 self.ui.debug(_("narrowed branch search to %s:%s\n")
1158 self.ui.debug(_("narrowed branch search to %s:%s\n")
1156 % (short(p), short(i)))
1159 % (short(p), short(i)))
1157 search.append((p, i))
1160 search.append((p, i))
1158 break
1161 break
1159 p, f = i, f * 2
1162 p, f = i, f * 2
1160
1163
1161 # sanity check our fetch list
1164 # sanity check our fetch list
1162 for f in fetch.keys():
1165 for f in fetch.keys():
1163 if f in m:
1166 if f in m:
1164 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1167 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1165
1168
1166 if base.keys() == [nullid]:
1169 if base.keys() == [nullid]:
1167 if force:
1170 if force:
1168 self.ui.warn(_("warning: repository is unrelated\n"))
1171 self.ui.warn(_("warning: repository is unrelated\n"))
1169 else:
1172 else:
1170 raise util.Abort(_("repository is unrelated"))
1173 raise util.Abort(_("repository is unrelated"))
1171
1174
1172 self.ui.debug(_("found new changesets starting at ") +
1175 self.ui.debug(_("found new changesets starting at ") +
1173 " ".join([short(f) for f in fetch]) + "\n")
1176 " ".join([short(f) for f in fetch]) + "\n")
1174
1177
1175 self.ui.debug(_("%d total queries\n") % reqcnt)
1178 self.ui.debug(_("%d total queries\n") % reqcnt)
1176
1179
1177 return fetch.keys()
1180 return fetch.keys()
1178
1181
1179 def findoutgoing(self, remote, base=None, heads=None, force=False):
1182 def findoutgoing(self, remote, base=None, heads=None, force=False):
1180 """Return list of nodes that are roots of subsets not in remote
1183 """Return list of nodes that are roots of subsets not in remote
1181
1184
1182 If base dict is specified, assume that these nodes and their parents
1185 If base dict is specified, assume that these nodes and their parents
1183 exist on the remote side.
1186 exist on the remote side.
1184 If a list of heads is specified, return only nodes which are heads
1187 If a list of heads is specified, return only nodes which are heads
1185 or ancestors of these heads, and return a second element which
1188 or ancestors of these heads, and return a second element which
1186 contains all remote heads which get new children.
1189 contains all remote heads which get new children.
1187 """
1190 """
1188 if base == None:
1191 if base == None:
1189 base = {}
1192 base = {}
1190 self.findincoming(remote, base, heads, force=force)
1193 self.findincoming(remote, base, heads, force=force)
1191
1194
1192 self.ui.debug(_("common changesets up to ")
1195 self.ui.debug(_("common changesets up to ")
1193 + " ".join(map(short, base.keys())) + "\n")
1196 + " ".join(map(short, base.keys())) + "\n")
1194
1197
1195 remain = dict.fromkeys(self.changelog.nodemap)
1198 remain = dict.fromkeys(self.changelog.nodemap)
1196
1199
1197 # prune everything remote has from the tree
1200 # prune everything remote has from the tree
1198 del remain[nullid]
1201 del remain[nullid]
1199 remove = base.keys()
1202 remove = base.keys()
1200 while remove:
1203 while remove:
1201 n = remove.pop(0)
1204 n = remove.pop(0)
1202 if n in remain:
1205 if n in remain:
1203 del remain[n]
1206 del remain[n]
1204 for p in self.changelog.parents(n):
1207 for p in self.changelog.parents(n):
1205 remove.append(p)
1208 remove.append(p)
1206
1209
1207 # find every node whose parents have been pruned
1210 # find every node whose parents have been pruned
1208 subset = []
1211 subset = []
1209 # find every remote head that will get new children
1212 # find every remote head that will get new children
1210 updated_heads = {}
1213 updated_heads = {}
1211 for n in remain:
1214 for n in remain:
1212 p1, p2 = self.changelog.parents(n)
1215 p1, p2 = self.changelog.parents(n)
1213 if p1 not in remain and p2 not in remain:
1216 if p1 not in remain and p2 not in remain:
1214 subset.append(n)
1217 subset.append(n)
1215 if heads:
1218 if heads:
1216 if p1 in heads:
1219 if p1 in heads:
1217 updated_heads[p1] = True
1220 updated_heads[p1] = True
1218 if p2 in heads:
1221 if p2 in heads:
1219 updated_heads[p2] = True
1222 updated_heads[p2] = True
1220
1223
1221 # this is the set of all roots we have to push
1224 # this is the set of all roots we have to push
1222 if heads:
1225 if heads:
1223 return subset, updated_heads.keys()
1226 return subset, updated_heads.keys()
1224 else:
1227 else:
1225 return subset
1228 return subset
1226
1229
1227 def pull(self, remote, heads=None, force=False, lock=None):
1230 def pull(self, remote, heads=None, force=False, lock=None):
1228 mylock = False
1231 mylock = False
1229 if not lock:
1232 if not lock:
1230 lock = self.lock()
1233 lock = self.lock()
1231 mylock = True
1234 mylock = True
1232
1235
1233 try:
1236 try:
1234 fetch = self.findincoming(remote, force=force)
1237 fetch = self.findincoming(remote, force=force)
1235 if fetch == [nullid]:
1238 if fetch == [nullid]:
1236 self.ui.status(_("requesting all changes\n"))
1239 self.ui.status(_("requesting all changes\n"))
1237
1240
1238 if not fetch:
1241 if not fetch:
1239 self.ui.status(_("no changes found\n"))
1242 self.ui.status(_("no changes found\n"))
1240 return 0
1243 return 0
1241
1244
1242 if heads is None:
1245 if heads is None:
1243 cg = remote.changegroup(fetch, 'pull')
1246 cg = remote.changegroup(fetch, 'pull')
1244 else:
1247 else:
1245 if 'changegroupsubset' not in remote.capabilities:
1248 if 'changegroupsubset' not in remote.capabilities:
1246 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1249 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1247 cg = remote.changegroupsubset(fetch, heads, 'pull')
1250 cg = remote.changegroupsubset(fetch, heads, 'pull')
1248 return self.addchangegroup(cg, 'pull', remote.url())
1251 return self.addchangegroup(cg, 'pull', remote.url())
1249 finally:
1252 finally:
1250 if mylock:
1253 if mylock:
1251 lock.release()
1254 lock.release()
1252
1255
1253 def push(self, remote, force=False, revs=None):
1256 def push(self, remote, force=False, revs=None):
1254 # there are two ways to push to remote repo:
1257 # there are two ways to push to remote repo:
1255 #
1258 #
1256 # addchangegroup assumes local user can lock remote
1259 # addchangegroup assumes local user can lock remote
1257 # repo (local filesystem, old ssh servers).
1260 # repo (local filesystem, old ssh servers).
1258 #
1261 #
1259 # unbundle assumes local user cannot lock remote repo (new ssh
1262 # unbundle assumes local user cannot lock remote repo (new ssh
1260 # servers, http servers).
1263 # servers, http servers).
1261
1264
1262 if remote.capable('unbundle'):
1265 if remote.capable('unbundle'):
1263 return self.push_unbundle(remote, force, revs)
1266 return self.push_unbundle(remote, force, revs)
1264 return self.push_addchangegroup(remote, force, revs)
1267 return self.push_addchangegroup(remote, force, revs)
1265
1268
1266 def prepush(self, remote, force, revs):
1269 def prepush(self, remote, force, revs):
1267 base = {}
1270 base = {}
1268 remote_heads = remote.heads()
1271 remote_heads = remote.heads()
1269 inc = self.findincoming(remote, base, remote_heads, force=force)
1272 inc = self.findincoming(remote, base, remote_heads, force=force)
1270 if not force and inc:
1273 if not force and inc:
1271 self.ui.warn(_("abort: unsynced remote changes!\n"))
1274 self.ui.warn(_("abort: unsynced remote changes!\n"))
1272 self.ui.status(_("(did you forget to sync?"
1275 self.ui.status(_("(did you forget to sync?"
1273 " use push -f to force)\n"))
1276 " use push -f to force)\n"))
1274 return None, 1
1277 return None, 1
1275
1278
1276 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1279 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1277 if revs is not None:
1280 if revs is not None:
1278 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1281 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1279 else:
1282 else:
1280 bases, heads = update, self.changelog.heads()
1283 bases, heads = update, self.changelog.heads()
1281
1284
1282 if not bases:
1285 if not bases:
1283 self.ui.status(_("no changes found\n"))
1286 self.ui.status(_("no changes found\n"))
1284 return None, 1
1287 return None, 1
1285 elif not force:
1288 elif not force:
1286 # FIXME we don't properly detect creation of new heads
1289 # FIXME we don't properly detect creation of new heads
1287 # in the push -r case, assume the user knows what he's doing
1290 # in the push -r case, assume the user knows what he's doing
1288 if not revs and len(remote_heads) < len(heads) \
1291 if not revs and len(remote_heads) < len(heads) \
1289 and remote_heads != [nullid]:
1292 and remote_heads != [nullid]:
1290 self.ui.warn(_("abort: push creates new remote branches!\n"))
1293 self.ui.warn(_("abort: push creates new remote branches!\n"))
1291 self.ui.status(_("(did you forget to merge?"
1294 self.ui.status(_("(did you forget to merge?"
1292 " use push -f to force)\n"))
1295 " use push -f to force)\n"))
1293 return None, 1
1296 return None, 1
1294
1297
1295 if revs is None:
1298 if revs is None:
1296 cg = self.changegroup(update, 'push')
1299 cg = self.changegroup(update, 'push')
1297 else:
1300 else:
1298 cg = self.changegroupsubset(update, revs, 'push')
1301 cg = self.changegroupsubset(update, revs, 'push')
1299 return cg, remote_heads
1302 return cg, remote_heads
1300
1303
1301 def push_addchangegroup(self, remote, force, revs):
1304 def push_addchangegroup(self, remote, force, revs):
1302 lock = remote.lock()
1305 lock = remote.lock()
1303
1306
1304 ret = self.prepush(remote, force, revs)
1307 ret = self.prepush(remote, force, revs)
1305 if ret[0] is not None:
1308 if ret[0] is not None:
1306 cg, remote_heads = ret
1309 cg, remote_heads = ret
1307 return remote.addchangegroup(cg, 'push', self.url())
1310 return remote.addchangegroup(cg, 'push', self.url())
1308 return ret[1]
1311 return ret[1]
1309
1312
1310 def push_unbundle(self, remote, force, revs):
1313 def push_unbundle(self, remote, force, revs):
1311 # local repo finds heads on server, finds out what revs it
1314 # local repo finds heads on server, finds out what revs it
1312 # must push. once revs transferred, if server finds it has
1315 # must push. once revs transferred, if server finds it has
1313 # different heads (someone else won commit/push race), server
1316 # different heads (someone else won commit/push race), server
1314 # aborts.
1317 # aborts.
1315
1318
1316 ret = self.prepush(remote, force, revs)
1319 ret = self.prepush(remote, force, revs)
1317 if ret[0] is not None:
1320 if ret[0] is not None:
1318 cg, remote_heads = ret
1321 cg, remote_heads = ret
1319 if force: remote_heads = ['force']
1322 if force: remote_heads = ['force']
1320 return remote.unbundle(cg, remote_heads, 'push')
1323 return remote.unbundle(cg, remote_heads, 'push')
1321 return ret[1]
1324 return ret[1]
1322
1325
1323 def changegroupsubset(self, bases, heads, source):
1326 def changegroupsubset(self, bases, heads, source):
1324 """This function generates a changegroup consisting of all the nodes
1327 """This function generates a changegroup consisting of all the nodes
1325 that are descendents of any of the bases, and ancestors of any of
1328 that are descendents of any of the bases, and ancestors of any of
1326 the heads.
1329 the heads.
1327
1330
1328 It is fairly complex as determining which filenodes and which
1331 It is fairly complex as determining which filenodes and which
1329 manifest nodes need to be included for the changeset to be complete
1332 manifest nodes need to be included for the changeset to be complete
1330 is non-trivial.
1333 is non-trivial.
1331
1334
1332 Another wrinkle is doing the reverse, figuring out which changeset in
1335 Another wrinkle is doing the reverse, figuring out which changeset in
1333 the changegroup a particular filenode or manifestnode belongs to."""
1336 the changegroup a particular filenode or manifestnode belongs to."""
1334
1337
1335 self.hook('preoutgoing', throw=True, source=source)
1338 self.hook('preoutgoing', throw=True, source=source)
1336
1339
1337 # Set up some initial variables
1340 # Set up some initial variables
1338 # Make it easy to refer to self.changelog
1341 # Make it easy to refer to self.changelog
1339 cl = self.changelog
1342 cl = self.changelog
1340 # msng is short for missing - compute the list of changesets in this
1343 # msng is short for missing - compute the list of changesets in this
1341 # changegroup.
1344 # changegroup.
1342 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1345 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1343 # Some bases may turn out to be superfluous, and some heads may be
1346 # Some bases may turn out to be superfluous, and some heads may be
1344 # too. nodesbetween will return the minimal set of bases and heads
1347 # too. nodesbetween will return the minimal set of bases and heads
1345 # necessary to re-create the changegroup.
1348 # necessary to re-create the changegroup.
1346
1349
1347 # Known heads are the list of heads that it is assumed the recipient
1350 # Known heads are the list of heads that it is assumed the recipient
1348 # of this changegroup will know about.
1351 # of this changegroup will know about.
1349 knownheads = {}
1352 knownheads = {}
1350 # We assume that all parents of bases are known heads.
1353 # We assume that all parents of bases are known heads.
1351 for n in bases:
1354 for n in bases:
1352 for p in cl.parents(n):
1355 for p in cl.parents(n):
1353 if p != nullid:
1356 if p != nullid:
1354 knownheads[p] = 1
1357 knownheads[p] = 1
1355 knownheads = knownheads.keys()
1358 knownheads = knownheads.keys()
1356 if knownheads:
1359 if knownheads:
1357 # Now that we know what heads are known, we can compute which
1360 # Now that we know what heads are known, we can compute which
1358 # changesets are known. The recipient must know about all
1361 # changesets are known. The recipient must know about all
1359 # changesets required to reach the known heads from the null
1362 # changesets required to reach the known heads from the null
1360 # changeset.
1363 # changeset.
1361 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1364 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1362 junk = None
1365 junk = None
1363 # Transform the list into an ersatz set.
1366 # Transform the list into an ersatz set.
1364 has_cl_set = dict.fromkeys(has_cl_set)
1367 has_cl_set = dict.fromkeys(has_cl_set)
1365 else:
1368 else:
1366 # If there were no known heads, the recipient cannot be assumed to
1369 # If there were no known heads, the recipient cannot be assumed to
1367 # know about any changesets.
1370 # know about any changesets.
1368 has_cl_set = {}
1371 has_cl_set = {}
1369
1372
1370 # Make it easy to refer to self.manifest
1373 # Make it easy to refer to self.manifest
1371 mnfst = self.manifest
1374 mnfst = self.manifest
1372 # We don't know which manifests are missing yet
1375 # We don't know which manifests are missing yet
1373 msng_mnfst_set = {}
1376 msng_mnfst_set = {}
1374 # Nor do we know which filenodes are missing.
1377 # Nor do we know which filenodes are missing.
1375 msng_filenode_set = {}
1378 msng_filenode_set = {}
1376
1379
1377 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1380 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1378 junk = None
1381 junk = None
1379
1382
1380 # A changeset always belongs to itself, so the changenode lookup
1383 # A changeset always belongs to itself, so the changenode lookup
1381 # function for a changenode is identity.
1384 # function for a changenode is identity.
1382 def identity(x):
1385 def identity(x):
1383 return x
1386 return x
1384
1387
1385 # A function generating function. Sets up an environment for the
1388 # A function generating function. Sets up an environment for the
1386 # inner function.
1389 # inner function.
1387 def cmp_by_rev_func(revlog):
1390 def cmp_by_rev_func(revlog):
1388 # Compare two nodes by their revision number in the environment's
1391 # Compare two nodes by their revision number in the environment's
1389 # revision history. Since the revision number both represents the
1392 # revision history. Since the revision number both represents the
1390 # most efficient order to read the nodes in, and represents a
1393 # most efficient order to read the nodes in, and represents a
1391 # topological sorting of the nodes, this function is often useful.
1394 # topological sorting of the nodes, this function is often useful.
1392 def cmp_by_rev(a, b):
1395 def cmp_by_rev(a, b):
1393 return cmp(revlog.rev(a), revlog.rev(b))
1396 return cmp(revlog.rev(a), revlog.rev(b))
1394 return cmp_by_rev
1397 return cmp_by_rev
1395
1398
1396 # If we determine that a particular file or manifest node must be a
1399 # If we determine that a particular file or manifest node must be a
1397 # node that the recipient of the changegroup will already have, we can
1400 # node that the recipient of the changegroup will already have, we can
1398 # also assume the recipient will have all the parents. This function
1401 # also assume the recipient will have all the parents. This function
1399 # prunes them from the set of missing nodes.
1402 # prunes them from the set of missing nodes.
1400 def prune_parents(revlog, hasset, msngset):
1403 def prune_parents(revlog, hasset, msngset):
1401 haslst = hasset.keys()
1404 haslst = hasset.keys()
1402 haslst.sort(cmp_by_rev_func(revlog))
1405 haslst.sort(cmp_by_rev_func(revlog))
1403 for node in haslst:
1406 for node in haslst:
1404 parentlst = [p for p in revlog.parents(node) if p != nullid]
1407 parentlst = [p for p in revlog.parents(node) if p != nullid]
1405 while parentlst:
1408 while parentlst:
1406 n = parentlst.pop()
1409 n = parentlst.pop()
1407 if n not in hasset:
1410 if n not in hasset:
1408 hasset[n] = 1
1411 hasset[n] = 1
1409 p = [p for p in revlog.parents(n) if p != nullid]
1412 p = [p for p in revlog.parents(n) if p != nullid]
1410 parentlst.extend(p)
1413 parentlst.extend(p)
1411 for n in hasset:
1414 for n in hasset:
1412 msngset.pop(n, None)
1415 msngset.pop(n, None)
1413
1416
1414 # This is a function generating function used to set up an environment
1417 # This is a function generating function used to set up an environment
1415 # for the inner function to execute in.
1418 # for the inner function to execute in.
1416 def manifest_and_file_collector(changedfileset):
1419 def manifest_and_file_collector(changedfileset):
1417 # This is an information gathering function that gathers
1420 # This is an information gathering function that gathers
1418 # information from each changeset node that goes out as part of
1421 # information from each changeset node that goes out as part of
1419 # the changegroup. The information gathered is a list of which
1422 # the changegroup. The information gathered is a list of which
1420 # manifest nodes are potentially required (the recipient may
1423 # manifest nodes are potentially required (the recipient may
1421 # already have them) and total list of all files which were
1424 # already have them) and total list of all files which were
1422 # changed in any changeset in the changegroup.
1425 # changed in any changeset in the changegroup.
1423 #
1426 #
1424 # We also remember the first changenode we saw any manifest
1427 # We also remember the first changenode we saw any manifest
1425 # referenced by so we can later determine which changenode 'owns'
1428 # referenced by so we can later determine which changenode 'owns'
1426 # the manifest.
1429 # the manifest.
1427 def collect_manifests_and_files(clnode):
1430 def collect_manifests_and_files(clnode):
1428 c = cl.read(clnode)
1431 c = cl.read(clnode)
1429 for f in c[3]:
1432 for f in c[3]:
1430 # This is to make sure we only have one instance of each
1433 # This is to make sure we only have one instance of each
1431 # filename string for each filename.
1434 # filename string for each filename.
1432 changedfileset.setdefault(f, f)
1435 changedfileset.setdefault(f, f)
1433 msng_mnfst_set.setdefault(c[0], clnode)
1436 msng_mnfst_set.setdefault(c[0], clnode)
1434 return collect_manifests_and_files
1437 return collect_manifests_and_files
1435
1438
1436 # Figure out which manifest nodes (of the ones we think might be part
1439 # Figure out which manifest nodes (of the ones we think might be part
1437 # of the changegroup) the recipient must know about and remove them
1440 # of the changegroup) the recipient must know about and remove them
1438 # from the changegroup.
1441 # from the changegroup.
1439 def prune_manifests():
1442 def prune_manifests():
1440 has_mnfst_set = {}
1443 has_mnfst_set = {}
1441 for n in msng_mnfst_set:
1444 for n in msng_mnfst_set:
1442 # If a 'missing' manifest thinks it belongs to a changenode
1445 # If a 'missing' manifest thinks it belongs to a changenode
1443 # the recipient is assumed to have, obviously the recipient
1446 # the recipient is assumed to have, obviously the recipient
1444 # must have that manifest.
1447 # must have that manifest.
1445 linknode = cl.node(mnfst.linkrev(n))
1448 linknode = cl.node(mnfst.linkrev(n))
1446 if linknode in has_cl_set:
1449 if linknode in has_cl_set:
1447 has_mnfst_set[n] = 1
1450 has_mnfst_set[n] = 1
1448 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1451 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1449
1452
1450 # Use the information collected in collect_manifests_and_files to say
1453 # Use the information collected in collect_manifests_and_files to say
1451 # which changenode any manifestnode belongs to.
1454 # which changenode any manifestnode belongs to.
1452 def lookup_manifest_link(mnfstnode):
1455 def lookup_manifest_link(mnfstnode):
1453 return msng_mnfst_set[mnfstnode]
1456 return msng_mnfst_set[mnfstnode]
1454
1457
1455 # A function generating function that sets up the initial environment
1458 # A function generating function that sets up the initial environment
1456 # the inner function.
1459 # the inner function.
1457 def filenode_collector(changedfiles):
1460 def filenode_collector(changedfiles):
1458 next_rev = [0]
1461 next_rev = [0]
1459 # This gathers information from each manifestnode included in the
1462 # This gathers information from each manifestnode included in the
1460 # changegroup about which filenodes the manifest node references
1463 # changegroup about which filenodes the manifest node references
1461 # so we can include those in the changegroup too.
1464 # so we can include those in the changegroup too.
1462 #
1465 #
1463 # It also remembers which changenode each filenode belongs to. It
1466 # It also remembers which changenode each filenode belongs to. It
1464 # does this by assuming the a filenode belongs to the changenode
1467 # does this by assuming the a filenode belongs to the changenode
1465 # the first manifest that references it belongs to.
1468 # the first manifest that references it belongs to.
1466 def collect_msng_filenodes(mnfstnode):
1469 def collect_msng_filenodes(mnfstnode):
1467 r = mnfst.rev(mnfstnode)
1470 r = mnfst.rev(mnfstnode)
1468 if r == next_rev[0]:
1471 if r == next_rev[0]:
1469 # If the last rev we looked at was the one just previous,
1472 # If the last rev we looked at was the one just previous,
1470 # we only need to see a diff.
1473 # we only need to see a diff.
1471 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1474 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1472 # For each line in the delta
1475 # For each line in the delta
1473 for dline in delta.splitlines():
1476 for dline in delta.splitlines():
1474 # get the filename and filenode for that line
1477 # get the filename and filenode for that line
1475 f, fnode = dline.split('\0')
1478 f, fnode = dline.split('\0')
1476 fnode = bin(fnode[:40])
1479 fnode = bin(fnode[:40])
1477 f = changedfiles.get(f, None)
1480 f = changedfiles.get(f, None)
1478 # And if the file is in the list of files we care
1481 # And if the file is in the list of files we care
1479 # about.
1482 # about.
1480 if f is not None:
1483 if f is not None:
1481 # Get the changenode this manifest belongs to
1484 # Get the changenode this manifest belongs to
1482 clnode = msng_mnfst_set[mnfstnode]
1485 clnode = msng_mnfst_set[mnfstnode]
1483 # Create the set of filenodes for the file if
1486 # Create the set of filenodes for the file if
1484 # there isn't one already.
1487 # there isn't one already.
1485 ndset = msng_filenode_set.setdefault(f, {})
1488 ndset = msng_filenode_set.setdefault(f, {})
1486 # And set the filenode's changelog node to the
1489 # And set the filenode's changelog node to the
1487 # manifest's if it hasn't been set already.
1490 # manifest's if it hasn't been set already.
1488 ndset.setdefault(fnode, clnode)
1491 ndset.setdefault(fnode, clnode)
1489 else:
1492 else:
1490 # Otherwise we need a full manifest.
1493 # Otherwise we need a full manifest.
1491 m = mnfst.read(mnfstnode)
1494 m = mnfst.read(mnfstnode)
1492 # For every file in we care about.
1495 # For every file in we care about.
1493 for f in changedfiles:
1496 for f in changedfiles:
1494 fnode = m.get(f, None)
1497 fnode = m.get(f, None)
1495 # If it's in the manifest
1498 # If it's in the manifest
1496 if fnode is not None:
1499 if fnode is not None:
1497 # See comments above.
1500 # See comments above.
1498 clnode = msng_mnfst_set[mnfstnode]
1501 clnode = msng_mnfst_set[mnfstnode]
1499 ndset = msng_filenode_set.setdefault(f, {})
1502 ndset = msng_filenode_set.setdefault(f, {})
1500 ndset.setdefault(fnode, clnode)
1503 ndset.setdefault(fnode, clnode)
1501 # Remember the revision we hope to see next.
1504 # Remember the revision we hope to see next.
1502 next_rev[0] = r + 1
1505 next_rev[0] = r + 1
1503 return collect_msng_filenodes
1506 return collect_msng_filenodes
1504
1507
1505 # We have a list of filenodes we think we need for a file, lets remove
1508 # We have a list of filenodes we think we need for a file, lets remove
1506 # all those we now the recipient must have.
1509 # all those we now the recipient must have.
1507 def prune_filenodes(f, filerevlog):
1510 def prune_filenodes(f, filerevlog):
1508 msngset = msng_filenode_set[f]
1511 msngset = msng_filenode_set[f]
1509 hasset = {}
1512 hasset = {}
1510 # If a 'missing' filenode thinks it belongs to a changenode we
1513 # If a 'missing' filenode thinks it belongs to a changenode we
1511 # assume the recipient must have, then the recipient must have
1514 # assume the recipient must have, then the recipient must have
1512 # that filenode.
1515 # that filenode.
1513 for n in msngset:
1516 for n in msngset:
1514 clnode = cl.node(filerevlog.linkrev(n))
1517 clnode = cl.node(filerevlog.linkrev(n))
1515 if clnode in has_cl_set:
1518 if clnode in has_cl_set:
1516 hasset[n] = 1
1519 hasset[n] = 1
1517 prune_parents(filerevlog, hasset, msngset)
1520 prune_parents(filerevlog, hasset, msngset)
1518
1521
1519 # A function generator function that sets up the a context for the
1522 # A function generator function that sets up the a context for the
1520 # inner function.
1523 # inner function.
1521 def lookup_filenode_link_func(fname):
1524 def lookup_filenode_link_func(fname):
1522 msngset = msng_filenode_set[fname]
1525 msngset = msng_filenode_set[fname]
1523 # Lookup the changenode the filenode belongs to.
1526 # Lookup the changenode the filenode belongs to.
1524 def lookup_filenode_link(fnode):
1527 def lookup_filenode_link(fnode):
1525 return msngset[fnode]
1528 return msngset[fnode]
1526 return lookup_filenode_link
1529 return lookup_filenode_link
1527
1530
1528 # Now that we have all theses utility functions to help out and
1531 # Now that we have all theses utility functions to help out and
1529 # logically divide up the task, generate the group.
1532 # logically divide up the task, generate the group.
1530 def gengroup():
1533 def gengroup():
1531 # The set of changed files starts empty.
1534 # The set of changed files starts empty.
1532 changedfiles = {}
1535 changedfiles = {}
1533 # Create a changenode group generator that will call our functions
1536 # Create a changenode group generator that will call our functions
1534 # back to lookup the owning changenode and collect information.
1537 # back to lookup the owning changenode and collect information.
1535 group = cl.group(msng_cl_lst, identity,
1538 group = cl.group(msng_cl_lst, identity,
1536 manifest_and_file_collector(changedfiles))
1539 manifest_and_file_collector(changedfiles))
1537 for chnk in group:
1540 for chnk in group:
1538 yield chnk
1541 yield chnk
1539
1542
1540 # The list of manifests has been collected by the generator
1543 # The list of manifests has been collected by the generator
1541 # calling our functions back.
1544 # calling our functions back.
1542 prune_manifests()
1545 prune_manifests()
1543 msng_mnfst_lst = msng_mnfst_set.keys()
1546 msng_mnfst_lst = msng_mnfst_set.keys()
1544 # Sort the manifestnodes by revision number.
1547 # Sort the manifestnodes by revision number.
1545 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1548 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1546 # Create a generator for the manifestnodes that calls our lookup
1549 # Create a generator for the manifestnodes that calls our lookup
1547 # and data collection functions back.
1550 # and data collection functions back.
1548 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1551 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1549 filenode_collector(changedfiles))
1552 filenode_collector(changedfiles))
1550 for chnk in group:
1553 for chnk in group:
1551 yield chnk
1554 yield chnk
1552
1555
1553 # These are no longer needed, dereference and toss the memory for
1556 # These are no longer needed, dereference and toss the memory for
1554 # them.
1557 # them.
1555 msng_mnfst_lst = None
1558 msng_mnfst_lst = None
1556 msng_mnfst_set.clear()
1559 msng_mnfst_set.clear()
1557
1560
1558 changedfiles = changedfiles.keys()
1561 changedfiles = changedfiles.keys()
1559 changedfiles.sort()
1562 changedfiles.sort()
1560 # Go through all our files in order sorted by name.
1563 # Go through all our files in order sorted by name.
1561 for fname in changedfiles:
1564 for fname in changedfiles:
1562 filerevlog = self.file(fname)
1565 filerevlog = self.file(fname)
1563 # Toss out the filenodes that the recipient isn't really
1566 # Toss out the filenodes that the recipient isn't really
1564 # missing.
1567 # missing.
1565 if msng_filenode_set.has_key(fname):
1568 if msng_filenode_set.has_key(fname):
1566 prune_filenodes(fname, filerevlog)
1569 prune_filenodes(fname, filerevlog)
1567 msng_filenode_lst = msng_filenode_set[fname].keys()
1570 msng_filenode_lst = msng_filenode_set[fname].keys()
1568 else:
1571 else:
1569 msng_filenode_lst = []
1572 msng_filenode_lst = []
1570 # If any filenodes are left, generate the group for them,
1573 # If any filenodes are left, generate the group for them,
1571 # otherwise don't bother.
1574 # otherwise don't bother.
1572 if len(msng_filenode_lst) > 0:
1575 if len(msng_filenode_lst) > 0:
1573 yield changegroup.genchunk(fname)
1576 yield changegroup.genchunk(fname)
1574 # Sort the filenodes by their revision #
1577 # Sort the filenodes by their revision #
1575 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1578 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1576 # Create a group generator and only pass in a changenode
1579 # Create a group generator and only pass in a changenode
1577 # lookup function as we need to collect no information
1580 # lookup function as we need to collect no information
1578 # from filenodes.
1581 # from filenodes.
1579 group = filerevlog.group(msng_filenode_lst,
1582 group = filerevlog.group(msng_filenode_lst,
1580 lookup_filenode_link_func(fname))
1583 lookup_filenode_link_func(fname))
1581 for chnk in group:
1584 for chnk in group:
1582 yield chnk
1585 yield chnk
1583 if msng_filenode_set.has_key(fname):
1586 if msng_filenode_set.has_key(fname):
1584 # Don't need this anymore, toss it to free memory.
1587 # Don't need this anymore, toss it to free memory.
1585 del msng_filenode_set[fname]
1588 del msng_filenode_set[fname]
1586 # Signal that no more groups are left.
1589 # Signal that no more groups are left.
1587 yield changegroup.closechunk()
1590 yield changegroup.closechunk()
1588
1591
1589 if msng_cl_lst:
1592 if msng_cl_lst:
1590 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1593 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1591
1594
1592 return util.chunkbuffer(gengroup())
1595 return util.chunkbuffer(gengroup())
1593
1596
1594 def changegroup(self, basenodes, source):
1597 def changegroup(self, basenodes, source):
1595 """Generate a changegroup of all nodes that we have that a recipient
1598 """Generate a changegroup of all nodes that we have that a recipient
1596 doesn't.
1599 doesn't.
1597
1600
1598 This is much easier than the previous function as we can assume that
1601 This is much easier than the previous function as we can assume that
1599 the recipient has any changenode we aren't sending them."""
1602 the recipient has any changenode we aren't sending them."""
1600
1603
1601 self.hook('preoutgoing', throw=True, source=source)
1604 self.hook('preoutgoing', throw=True, source=source)
1602
1605
1603 cl = self.changelog
1606 cl = self.changelog
1604 nodes = cl.nodesbetween(basenodes, None)[0]
1607 nodes = cl.nodesbetween(basenodes, None)[0]
1605 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1608 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1606
1609
1607 def identity(x):
1610 def identity(x):
1608 return x
1611 return x
1609
1612
1610 def gennodelst(revlog):
1613 def gennodelst(revlog):
1611 for r in xrange(0, revlog.count()):
1614 for r in xrange(0, revlog.count()):
1612 n = revlog.node(r)
1615 n = revlog.node(r)
1613 if revlog.linkrev(n) in revset:
1616 if revlog.linkrev(n) in revset:
1614 yield n
1617 yield n
1615
1618
1616 def changed_file_collector(changedfileset):
1619 def changed_file_collector(changedfileset):
1617 def collect_changed_files(clnode):
1620 def collect_changed_files(clnode):
1618 c = cl.read(clnode)
1621 c = cl.read(clnode)
1619 for fname in c[3]:
1622 for fname in c[3]:
1620 changedfileset[fname] = 1
1623 changedfileset[fname] = 1
1621 return collect_changed_files
1624 return collect_changed_files
1622
1625
1623 def lookuprevlink_func(revlog):
1626 def lookuprevlink_func(revlog):
1624 def lookuprevlink(n):
1627 def lookuprevlink(n):
1625 return cl.node(revlog.linkrev(n))
1628 return cl.node(revlog.linkrev(n))
1626 return lookuprevlink
1629 return lookuprevlink
1627
1630
1628 def gengroup():
1631 def gengroup():
1629 # construct a list of all changed files
1632 # construct a list of all changed files
1630 changedfiles = {}
1633 changedfiles = {}
1631
1634
1632 for chnk in cl.group(nodes, identity,
1635 for chnk in cl.group(nodes, identity,
1633 changed_file_collector(changedfiles)):
1636 changed_file_collector(changedfiles)):
1634 yield chnk
1637 yield chnk
1635 changedfiles = changedfiles.keys()
1638 changedfiles = changedfiles.keys()
1636 changedfiles.sort()
1639 changedfiles.sort()
1637
1640
1638 mnfst = self.manifest
1641 mnfst = self.manifest
1639 nodeiter = gennodelst(mnfst)
1642 nodeiter = gennodelst(mnfst)
1640 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1643 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1641 yield chnk
1644 yield chnk
1642
1645
1643 for fname in changedfiles:
1646 for fname in changedfiles:
1644 filerevlog = self.file(fname)
1647 filerevlog = self.file(fname)
1645 nodeiter = gennodelst(filerevlog)
1648 nodeiter = gennodelst(filerevlog)
1646 nodeiter = list(nodeiter)
1649 nodeiter = list(nodeiter)
1647 if nodeiter:
1650 if nodeiter:
1648 yield changegroup.genchunk(fname)
1651 yield changegroup.genchunk(fname)
1649 lookup = lookuprevlink_func(filerevlog)
1652 lookup = lookuprevlink_func(filerevlog)
1650 for chnk in filerevlog.group(nodeiter, lookup):
1653 for chnk in filerevlog.group(nodeiter, lookup):
1651 yield chnk
1654 yield chnk
1652
1655
1653 yield changegroup.closechunk()
1656 yield changegroup.closechunk()
1654
1657
1655 if nodes:
1658 if nodes:
1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657
1660
1658 return util.chunkbuffer(gengroup())
1661 return util.chunkbuffer(gengroup())
1659
1662
1660 def addchangegroup(self, source, srctype, url):
1663 def addchangegroup(self, source, srctype, url):
1661 """add changegroup to repo.
1664 """add changegroup to repo.
1662 returns number of heads modified or added + 1."""
1665 returns number of heads modified or added + 1."""
1663
1666
1664 def csmap(x):
1667 def csmap(x):
1665 self.ui.debug(_("add changeset %s\n") % short(x))
1668 self.ui.debug(_("add changeset %s\n") % short(x))
1666 return cl.count()
1669 return cl.count()
1667
1670
1668 def revmap(x):
1671 def revmap(x):
1669 return cl.rev(x)
1672 return cl.rev(x)
1670
1673
1671 if not source:
1674 if not source:
1672 return 0
1675 return 0
1673
1676
1674 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1677 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1675
1678
1676 changesets = files = revisions = 0
1679 changesets = files = revisions = 0
1677
1680
1678 tr = self.transaction()
1681 tr = self.transaction()
1679
1682
1680 # write changelog data to temp files so concurrent readers will not see
1683 # write changelog data to temp files so concurrent readers will not see
1681 # inconsistent view
1684 # inconsistent view
1682 cl = None
1685 cl = None
1683 try:
1686 try:
1684 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1687 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1685
1688
1686 oldheads = len(cl.heads())
1689 oldheads = len(cl.heads())
1687
1690
1688 # pull off the changeset group
1691 # pull off the changeset group
1689 self.ui.status(_("adding changesets\n"))
1692 self.ui.status(_("adding changesets\n"))
1690 cor = cl.count() - 1
1693 cor = cl.count() - 1
1691 chunkiter = changegroup.chunkiter(source)
1694 chunkiter = changegroup.chunkiter(source)
1692 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1695 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1693 raise util.Abort(_("received changelog group is empty"))
1696 raise util.Abort(_("received changelog group is empty"))
1694 cnr = cl.count() - 1
1697 cnr = cl.count() - 1
1695 changesets = cnr - cor
1698 changesets = cnr - cor
1696
1699
1697 # pull off the manifest group
1700 # pull off the manifest group
1698 self.ui.status(_("adding manifests\n"))
1701 self.ui.status(_("adding manifests\n"))
1699 chunkiter = changegroup.chunkiter(source)
1702 chunkiter = changegroup.chunkiter(source)
1700 # no need to check for empty manifest group here:
1703 # no need to check for empty manifest group here:
1701 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1704 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1702 # no new manifest will be created and the manifest group will
1705 # no new manifest will be created and the manifest group will
1703 # be empty during the pull
1706 # be empty during the pull
1704 self.manifest.addgroup(chunkiter, revmap, tr)
1707 self.manifest.addgroup(chunkiter, revmap, tr)
1705
1708
1706 # process the files
1709 # process the files
1707 self.ui.status(_("adding file changes\n"))
1710 self.ui.status(_("adding file changes\n"))
1708 while 1:
1711 while 1:
1709 f = changegroup.getchunk(source)
1712 f = changegroup.getchunk(source)
1710 if not f:
1713 if not f:
1711 break
1714 break
1712 self.ui.debug(_("adding %s revisions\n") % f)
1715 self.ui.debug(_("adding %s revisions\n") % f)
1713 fl = self.file(f)
1716 fl = self.file(f)
1714 o = fl.count()
1717 o = fl.count()
1715 chunkiter = changegroup.chunkiter(source)
1718 chunkiter = changegroup.chunkiter(source)
1716 if fl.addgroup(chunkiter, revmap, tr) is None:
1719 if fl.addgroup(chunkiter, revmap, tr) is None:
1717 raise util.Abort(_("received file revlog group is empty"))
1720 raise util.Abort(_("received file revlog group is empty"))
1718 revisions += fl.count() - o
1721 revisions += fl.count() - o
1719 files += 1
1722 files += 1
1720
1723
1721 cl.writedata()
1724 cl.writedata()
1722 finally:
1725 finally:
1723 if cl:
1726 if cl:
1724 cl.cleanup()
1727 cl.cleanup()
1725
1728
1726 # make changelog see real files again
1729 # make changelog see real files again
1727 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1730 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1728 self.changelog.checkinlinesize(tr)
1731 self.changelog.checkinlinesize(tr)
1729
1732
1730 newheads = len(self.changelog.heads())
1733 newheads = len(self.changelog.heads())
1731 heads = ""
1734 heads = ""
1732 if oldheads and newheads != oldheads:
1735 if oldheads and newheads != oldheads:
1733 heads = _(" (%+d heads)") % (newheads - oldheads)
1736 heads = _(" (%+d heads)") % (newheads - oldheads)
1734
1737
1735 self.ui.status(_("added %d changesets"
1738 self.ui.status(_("added %d changesets"
1736 " with %d changes to %d files%s\n")
1739 " with %d changes to %d files%s\n")
1737 % (changesets, revisions, files, heads))
1740 % (changesets, revisions, files, heads))
1738
1741
1739 if changesets > 0:
1742 if changesets > 0:
1740 self.hook('pretxnchangegroup', throw=True,
1743 self.hook('pretxnchangegroup', throw=True,
1741 node=hex(self.changelog.node(cor+1)), source=srctype,
1744 node=hex(self.changelog.node(cor+1)), source=srctype,
1742 url=url)
1745 url=url)
1743
1746
1744 tr.close()
1747 tr.close()
1745
1748
1746 if changesets > 0:
1749 if changesets > 0:
1747 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1750 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1748 source=srctype, url=url)
1751 source=srctype, url=url)
1749
1752
1750 for i in range(cor + 1, cnr + 1):
1753 for i in range(cor + 1, cnr + 1):
1751 self.hook("incoming", node=hex(self.changelog.node(i)),
1754 self.hook("incoming", node=hex(self.changelog.node(i)),
1752 source=srctype, url=url)
1755 source=srctype, url=url)
1753
1756
1754 return newheads - oldheads + 1
1757 return newheads - oldheads + 1
1755
1758
1756
1759
1757 def stream_in(self, remote):
1760 def stream_in(self, remote):
1758 fp = remote.stream_out()
1761 fp = remote.stream_out()
1759 resp = int(fp.readline())
1762 resp = int(fp.readline())
1760 if resp != 0:
1763 if resp != 0:
1761 raise util.Abort(_('operation forbidden by server'))
1764 raise util.Abort(_('operation forbidden by server'))
1762 self.ui.status(_('streaming all changes\n'))
1765 self.ui.status(_('streaming all changes\n'))
1763 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1766 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1764 self.ui.status(_('%d files to transfer, %s of data\n') %
1767 self.ui.status(_('%d files to transfer, %s of data\n') %
1765 (total_files, util.bytecount(total_bytes)))
1768 (total_files, util.bytecount(total_bytes)))
1766 start = time.time()
1769 start = time.time()
1767 for i in xrange(total_files):
1770 for i in xrange(total_files):
1768 name, size = fp.readline().split('\0', 1)
1771 name, size = fp.readline().split('\0', 1)
1769 size = int(size)
1772 size = int(size)
1770 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1773 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1771 ofp = self.opener(name, 'w')
1774 ofp = self.opener(name, 'w')
1772 for chunk in util.filechunkiter(fp, limit=size):
1775 for chunk in util.filechunkiter(fp, limit=size):
1773 ofp.write(chunk)
1776 ofp.write(chunk)
1774 ofp.close()
1777 ofp.close()
1775 elapsed = time.time() - start
1778 elapsed = time.time() - start
1776 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1779 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1777 (util.bytecount(total_bytes), elapsed,
1780 (util.bytecount(total_bytes), elapsed,
1778 util.bytecount(total_bytes / elapsed)))
1781 util.bytecount(total_bytes / elapsed)))
1779 self.reload()
1782 self.reload()
1780 return len(self.heads()) + 1
1783 return len(self.heads()) + 1
1781
1784
1782 def clone(self, remote, heads=[], stream=False):
1785 def clone(self, remote, heads=[], stream=False):
1783 '''clone remote repository.
1786 '''clone remote repository.
1784
1787
1785 keyword arguments:
1788 keyword arguments:
1786 heads: list of revs to clone (forces use of pull)
1789 heads: list of revs to clone (forces use of pull)
1787 stream: use streaming clone if possible'''
1790 stream: use streaming clone if possible'''
1788
1791
1789 # now, all clients that can request uncompressed clones can
1792 # now, all clients that can request uncompressed clones can
1790 # read repo formats supported by all servers that can serve
1793 # read repo formats supported by all servers that can serve
1791 # them.
1794 # them.
1792
1795
1793 # if revlog format changes, client will have to check version
1796 # if revlog format changes, client will have to check version
1794 # and format flags on "stream" capability, and use
1797 # and format flags on "stream" capability, and use
1795 # uncompressed only if compatible.
1798 # uncompressed only if compatible.
1796
1799
1797 if stream and not heads and remote.capable('stream'):
1800 if stream and not heads and remote.capable('stream'):
1798 return self.stream_in(remote)
1801 return self.stream_in(remote)
1799 return self.pull(remote, heads)
1802 return self.pull(remote, heads)
1800
1803
1801 # used to avoid circular references so destructors work
1804 # used to avoid circular references so destructors work
1802 def aftertrans(base):
1805 def aftertrans(base):
1803 p = base
1806 p = base
1804 def a():
1807 def a():
1805 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1808 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1806 util.rename(os.path.join(p, "journal.dirstate"),
1809 util.rename(os.path.join(p, "journal.dirstate"),
1807 os.path.join(p, "undo.dirstate"))
1810 os.path.join(p, "undo.dirstate"))
1808 return a
1811 return a
1809
1812
1810 def instance(ui, path, create):
1813 def instance(ui, path, create):
1811 return localrepository(ui, util.drop_scheme('file', path), create)
1814 return localrepository(ui, util.drop_scheme('file', path), create)
1812
1815
1813 def islocal(path):
1816 def islocal(path):
1814 return True
1817 return True
General Comments 0
You need to be logged in to leave comments. Login now