##// END OF EJS Templates
Minor tags optimization
Matt Mackall -
r3456:3464f5e7 default
parent child Browse files
Show More
@@ -1,1817 +1,1819 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 seen = {}
249 for node in heads:
249 for node in heads:
250 f = self.filectx('.hgtags', node)
250 f = self.filectx('.hgtags', node)
251 if not f: continue
251 if not f or f.filerev() in seen: continue
252 seen[f.filerev()] = 1
252 count = 0
253 count = 0
253 for l in f.data().splitlines():
254 for l in f.data().splitlines():
254 count += 1
255 count += 1
255 parsetag(l, _("%s, line %d") % (str(f), count))
256 parsetag(l, _("%s, line %d") % (str(f), count))
257
256 try:
258 try:
257 f = self.opener("localtags")
259 f = self.opener("localtags")
258 count = 0
260 count = 0
259 for l in f:
261 for l in f:
260 count += 1
262 count += 1
261 parsetag(l, _("localtags, line %d") % count)
263 parsetag(l, _("localtags, line %d") % count)
262 except IOError:
264 except IOError:
263 pass
265 pass
264
266
265 self.tagscache['tip'] = self.changelog.tip()
267 self.tagscache['tip'] = self.changelog.tip()
266
268
267 return self.tagscache
269 return self.tagscache
268
270
269 def tagslist(self):
271 def tagslist(self):
270 '''return a list of tags ordered by revision'''
272 '''return a list of tags ordered by revision'''
271 l = []
273 l = []
272 for t, n in self.tags().items():
274 for t, n in self.tags().items():
273 try:
275 try:
274 r = self.changelog.rev(n)
276 r = self.changelog.rev(n)
275 except:
277 except:
276 r = -2 # sort to the beginning of the list if unknown
278 r = -2 # sort to the beginning of the list if unknown
277 l.append((r, t, n))
279 l.append((r, t, n))
278 l.sort()
280 l.sort()
279 return [(t, n) for r, t, n in l]
281 return [(t, n) for r, t, n in l]
280
282
281 def nodetags(self, node):
283 def nodetags(self, node):
282 '''return the tags associated with a node'''
284 '''return the tags associated with a node'''
283 if not self.nodetagscache:
285 if not self.nodetagscache:
284 self.nodetagscache = {}
286 self.nodetagscache = {}
285 for t, n in self.tags().items():
287 for t, n in self.tags().items():
286 self.nodetagscache.setdefault(n, []).append(t)
288 self.nodetagscache.setdefault(n, []).append(t)
287 return self.nodetagscache.get(node, [])
289 return self.nodetagscache.get(node, [])
288
290
289 def branchtags(self):
291 def branchtags(self):
290 if self.branchcache != None:
292 if self.branchcache != None:
291 return self.branchcache
293 return self.branchcache
292
294
293 self.branchcache = {} # avoid recursion in changectx
295 self.branchcache = {} # avoid recursion in changectx
294
296
295 try:
297 try:
296 f = self.opener("branches.cache")
298 f = self.opener("branches.cache")
297 last, lrev = f.readline().rstrip().split(" ", 1)
299 last, lrev = f.readline().rstrip().split(" ", 1)
298 last, lrev = bin(last), int(lrev)
300 last, lrev = bin(last), int(lrev)
299 if (lrev < self.changelog.count() and
301 if (lrev < self.changelog.count() and
300 self.changelog.node(lrev) == last): # sanity check
302 self.changelog.node(lrev) == last): # sanity check
301 for l in f:
303 for l in f:
302 node, label = l.rstrip().split(" ", 1)
304 node, label = l.rstrip().split(" ", 1)
303 self.branchcache[label] = bin(node)
305 self.branchcache[label] = bin(node)
304 else: # invalidate the cache
306 else: # invalidate the cache
305 last, lrev = nullid, -1
307 last, lrev = nullid, -1
306 f.close()
308 f.close()
307 except IOError:
309 except IOError:
308 last, lrev = nullid, -1
310 last, lrev = nullid, -1
309
311
310 tip = self.changelog.count() - 1
312 tip = self.changelog.count() - 1
311 if lrev != tip:
313 if lrev != tip:
312 for r in xrange(lrev + 1, tip + 1):
314 for r in xrange(lrev + 1, tip + 1):
313 c = self.changectx(r)
315 c = self.changectx(r)
314 b = c.branch()
316 b = c.branch()
315 if b:
317 if b:
316 self.branchcache[b] = c.node()
318 self.branchcache[b] = c.node()
317 self._writebranchcache()
319 self._writebranchcache()
318
320
319 return self.branchcache
321 return self.branchcache
320
322
321 def _writebranchcache(self):
323 def _writebranchcache(self):
322 try:
324 try:
323 f = self.opener("branches.cache", "w")
325 f = self.opener("branches.cache", "w")
324 t = self.changelog.tip()
326 t = self.changelog.tip()
325 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
327 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 for label, node in self.branchcache.iteritems():
328 for label, node in self.branchcache.iteritems():
327 f.write("%s %s\n" % (hex(node), label))
329 f.write("%s %s\n" % (hex(node), label))
328 except IOError:
330 except IOError:
329 pass
331 pass
330
332
331 def lookup(self, key):
333 def lookup(self, key):
332 if key == '.':
334 if key == '.':
333 key = self.dirstate.parents()[0]
335 key = self.dirstate.parents()[0]
334 if key == nullid:
336 if key == nullid:
335 raise repo.RepoError(_("no revision checked out"))
337 raise repo.RepoError(_("no revision checked out"))
336 n = self.changelog._match(key)
338 n = self.changelog._match(key)
337 if n:
339 if n:
338 return n
340 return n
339 if key in self.tags():
341 if key in self.tags():
340 return self.tags()[key]
342 return self.tags()[key]
341 if key in self.branchtags():
343 if key in self.branchtags():
342 return self.branchtags()[key]
344 return self.branchtags()[key]
343 n = self.changelog._partialmatch(key)
345 n = self.changelog._partialmatch(key)
344 if n:
346 if n:
345 return n
347 return n
346 raise repo.RepoError(_("unknown revision '%s'") % key)
348 raise repo.RepoError(_("unknown revision '%s'") % key)
347
349
348 def dev(self):
350 def dev(self):
349 return os.lstat(self.path).st_dev
351 return os.lstat(self.path).st_dev
350
352
351 def local(self):
353 def local(self):
352 return True
354 return True
353
355
354 def join(self, f):
356 def join(self, f):
355 return os.path.join(self.path, f)
357 return os.path.join(self.path, f)
356
358
357 def wjoin(self, f):
359 def wjoin(self, f):
358 return os.path.join(self.root, f)
360 return os.path.join(self.root, f)
359
361
360 def file(self, f):
362 def file(self, f):
361 if f[0] == '/':
363 if f[0] == '/':
362 f = f[1:]
364 f = f[1:]
363 return filelog.filelog(self.opener, f, self.revlogversion)
365 return filelog.filelog(self.opener, f, self.revlogversion)
364
366
365 def changectx(self, changeid=None):
367 def changectx(self, changeid=None):
366 return context.changectx(self, changeid)
368 return context.changectx(self, changeid)
367
369
368 def workingctx(self):
370 def workingctx(self):
369 return context.workingctx(self)
371 return context.workingctx(self)
370
372
371 def parents(self, changeid=None):
373 def parents(self, changeid=None):
372 '''
374 '''
373 get list of changectxs for parents of changeid or working directory
375 get list of changectxs for parents of changeid or working directory
374 '''
376 '''
375 if changeid is None:
377 if changeid is None:
376 pl = self.dirstate.parents()
378 pl = self.dirstate.parents()
377 else:
379 else:
378 n = self.changelog.lookup(changeid)
380 n = self.changelog.lookup(changeid)
379 pl = self.changelog.parents(n)
381 pl = self.changelog.parents(n)
380 if pl[1] == nullid:
382 if pl[1] == nullid:
381 return [self.changectx(pl[0])]
383 return [self.changectx(pl[0])]
382 return [self.changectx(pl[0]), self.changectx(pl[1])]
384 return [self.changectx(pl[0]), self.changectx(pl[1])]
383
385
384 def filectx(self, path, changeid=None, fileid=None):
386 def filectx(self, path, changeid=None, fileid=None):
385 """changeid can be a changeset revision, node, or tag.
387 """changeid can be a changeset revision, node, or tag.
386 fileid can be a file revision or node."""
388 fileid can be a file revision or node."""
387 return context.filectx(self, path, changeid, fileid)
389 return context.filectx(self, path, changeid, fileid)
388
390
389 def getcwd(self):
391 def getcwd(self):
390 return self.dirstate.getcwd()
392 return self.dirstate.getcwd()
391
393
392 def wfile(self, f, mode='r'):
394 def wfile(self, f, mode='r'):
393 return self.wopener(f, mode)
395 return self.wopener(f, mode)
394
396
395 def wread(self, filename):
397 def wread(self, filename):
396 if self.encodepats == None:
398 if self.encodepats == None:
397 l = []
399 l = []
398 for pat, cmd in self.ui.configitems("encode"):
400 for pat, cmd in self.ui.configitems("encode"):
399 mf = util.matcher(self.root, "", [pat], [], [])[1]
401 mf = util.matcher(self.root, "", [pat], [], [])[1]
400 l.append((mf, cmd))
402 l.append((mf, cmd))
401 self.encodepats = l
403 self.encodepats = l
402
404
403 data = self.wopener(filename, 'r').read()
405 data = self.wopener(filename, 'r').read()
404
406
405 for mf, cmd in self.encodepats:
407 for mf, cmd in self.encodepats:
406 if mf(filename):
408 if mf(filename):
407 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
409 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
408 data = util.filter(data, cmd)
410 data = util.filter(data, cmd)
409 break
411 break
410
412
411 return data
413 return data
412
414
413 def wwrite(self, filename, data, fd=None):
415 def wwrite(self, filename, data, fd=None):
414 if self.decodepats == None:
416 if self.decodepats == None:
415 l = []
417 l = []
416 for pat, cmd in self.ui.configitems("decode"):
418 for pat, cmd in self.ui.configitems("decode"):
417 mf = util.matcher(self.root, "", [pat], [], [])[1]
419 mf = util.matcher(self.root, "", [pat], [], [])[1]
418 l.append((mf, cmd))
420 l.append((mf, cmd))
419 self.decodepats = l
421 self.decodepats = l
420
422
421 for mf, cmd in self.decodepats:
423 for mf, cmd in self.decodepats:
422 if mf(filename):
424 if mf(filename):
423 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
425 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
424 data = util.filter(data, cmd)
426 data = util.filter(data, cmd)
425 break
427 break
426
428
427 if fd:
429 if fd:
428 return fd.write(data)
430 return fd.write(data)
429 return self.wopener(filename, 'w').write(data)
431 return self.wopener(filename, 'w').write(data)
430
432
431 def transaction(self):
433 def transaction(self):
432 tr = self.transhandle
434 tr = self.transhandle
433 if tr != None and tr.running():
435 if tr != None and tr.running():
434 return tr.nest()
436 return tr.nest()
435
437
436 # save dirstate for rollback
438 # save dirstate for rollback
437 try:
439 try:
438 ds = self.opener("dirstate").read()
440 ds = self.opener("dirstate").read()
439 except IOError:
441 except IOError:
440 ds = ""
442 ds = ""
441 self.opener("journal.dirstate", "w").write(ds)
443 self.opener("journal.dirstate", "w").write(ds)
442
444
443 tr = transaction.transaction(self.ui.warn, self.opener,
445 tr = transaction.transaction(self.ui.warn, self.opener,
444 self.join("journal"),
446 self.join("journal"),
445 aftertrans(self.path))
447 aftertrans(self.path))
446 self.transhandle = tr
448 self.transhandle = tr
447 return tr
449 return tr
448
450
449 def recover(self):
451 def recover(self):
450 l = self.lock()
452 l = self.lock()
451 if os.path.exists(self.join("journal")):
453 if os.path.exists(self.join("journal")):
452 self.ui.status(_("rolling back interrupted transaction\n"))
454 self.ui.status(_("rolling back interrupted transaction\n"))
453 transaction.rollback(self.opener, self.join("journal"))
455 transaction.rollback(self.opener, self.join("journal"))
454 self.reload()
456 self.reload()
455 return True
457 return True
456 else:
458 else:
457 self.ui.warn(_("no interrupted transaction available\n"))
459 self.ui.warn(_("no interrupted transaction available\n"))
458 return False
460 return False
459
461
460 def rollback(self, wlock=None):
462 def rollback(self, wlock=None):
461 if not wlock:
463 if not wlock:
462 wlock = self.wlock()
464 wlock = self.wlock()
463 l = self.lock()
465 l = self.lock()
464 if os.path.exists(self.join("undo")):
466 if os.path.exists(self.join("undo")):
465 self.ui.status(_("rolling back last transaction\n"))
467 self.ui.status(_("rolling back last transaction\n"))
466 transaction.rollback(self.opener, self.join("undo"))
468 transaction.rollback(self.opener, self.join("undo"))
467 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
469 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
468 self.reload()
470 self.reload()
469 self.wreload()
471 self.wreload()
470 else:
472 else:
471 self.ui.warn(_("no rollback information available\n"))
473 self.ui.warn(_("no rollback information available\n"))
472
474
473 def wreload(self):
475 def wreload(self):
474 self.dirstate.read()
476 self.dirstate.read()
475
477
476 def reload(self):
478 def reload(self):
477 self.changelog.load()
479 self.changelog.load()
478 self.manifest.load()
480 self.manifest.load()
479 self.tagscache = None
481 self.tagscache = None
480 self.nodetagscache = None
482 self.nodetagscache = None
481
483
482 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
484 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
483 desc=None):
485 desc=None):
484 try:
486 try:
485 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
487 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
486 except lock.LockHeld, inst:
488 except lock.LockHeld, inst:
487 if not wait:
489 if not wait:
488 raise
490 raise
489 self.ui.warn(_("waiting for lock on %s held by %s\n") %
491 self.ui.warn(_("waiting for lock on %s held by %s\n") %
490 (desc, inst.args[0]))
492 (desc, inst.args[0]))
491 # default to 600 seconds timeout
493 # default to 600 seconds timeout
492 l = lock.lock(self.join(lockname),
494 l = lock.lock(self.join(lockname),
493 int(self.ui.config("ui", "timeout") or 600),
495 int(self.ui.config("ui", "timeout") or 600),
494 releasefn, desc=desc)
496 releasefn, desc=desc)
495 if acquirefn:
497 if acquirefn:
496 acquirefn()
498 acquirefn()
497 return l
499 return l
498
500
499 def lock(self, wait=1):
501 def lock(self, wait=1):
500 return self.do_lock("lock", wait, acquirefn=self.reload,
502 return self.do_lock("lock", wait, acquirefn=self.reload,
501 desc=_('repository %s') % self.origroot)
503 desc=_('repository %s') % self.origroot)
502
504
503 def wlock(self, wait=1):
505 def wlock(self, wait=1):
504 return self.do_lock("wlock", wait, self.dirstate.write,
506 return self.do_lock("wlock", wait, self.dirstate.write,
505 self.wreload,
507 self.wreload,
506 desc=_('working directory of %s') % self.origroot)
508 desc=_('working directory of %s') % self.origroot)
507
509
508 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
510 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
509 """
511 """
510 commit an individual file as part of a larger transaction
512 commit an individual file as part of a larger transaction
511 """
513 """
512
514
513 t = self.wread(fn)
515 t = self.wread(fn)
514 fl = self.file(fn)
516 fl = self.file(fn)
515 fp1 = manifest1.get(fn, nullid)
517 fp1 = manifest1.get(fn, nullid)
516 fp2 = manifest2.get(fn, nullid)
518 fp2 = manifest2.get(fn, nullid)
517
519
518 meta = {}
520 meta = {}
519 cp = self.dirstate.copied(fn)
521 cp = self.dirstate.copied(fn)
520 if cp:
522 if cp:
521 meta["copy"] = cp
523 meta["copy"] = cp
522 if not manifest2: # not a branch merge
524 if not manifest2: # not a branch merge
523 meta["copyrev"] = hex(manifest1.get(cp, nullid))
525 meta["copyrev"] = hex(manifest1.get(cp, nullid))
524 fp2 = nullid
526 fp2 = nullid
525 elif fp2 != nullid: # copied on remote side
527 elif fp2 != nullid: # copied on remote side
526 meta["copyrev"] = hex(manifest1.get(cp, nullid))
528 meta["copyrev"] = hex(manifest1.get(cp, nullid))
527 else: # copied on local side, reversed
529 else: # copied on local side, reversed
528 meta["copyrev"] = hex(manifest2.get(cp))
530 meta["copyrev"] = hex(manifest2.get(cp))
529 fp2 = nullid
531 fp2 = nullid
530 self.ui.debug(_(" %s: copy %s:%s\n") %
532 self.ui.debug(_(" %s: copy %s:%s\n") %
531 (fn, cp, meta["copyrev"]))
533 (fn, cp, meta["copyrev"]))
532 fp1 = nullid
534 fp1 = nullid
533 elif fp2 != nullid:
535 elif fp2 != nullid:
534 # is one parent an ancestor of the other?
536 # is one parent an ancestor of the other?
535 fpa = fl.ancestor(fp1, fp2)
537 fpa = fl.ancestor(fp1, fp2)
536 if fpa == fp1:
538 if fpa == fp1:
537 fp1, fp2 = fp2, nullid
539 fp1, fp2 = fp2, nullid
538 elif fpa == fp2:
540 elif fpa == fp2:
539 fp2 = nullid
541 fp2 = nullid
540
542
541 # is the file unmodified from the parent? report existing entry
543 # is the file unmodified from the parent? report existing entry
542 if fp2 == nullid and not fl.cmp(fp1, t):
544 if fp2 == nullid and not fl.cmp(fp1, t):
543 return fp1
545 return fp1
544
546
545 changelist.append(fn)
547 changelist.append(fn)
546 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
548 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
547
549
548 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
550 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
549 orig_parent = self.dirstate.parents()[0] or nullid
551 orig_parent = self.dirstate.parents()[0] or nullid
550 p1 = p1 or self.dirstate.parents()[0] or nullid
552 p1 = p1 or self.dirstate.parents()[0] or nullid
551 p2 = p2 or self.dirstate.parents()[1] or nullid
553 p2 = p2 or self.dirstate.parents()[1] or nullid
552 c1 = self.changelog.read(p1)
554 c1 = self.changelog.read(p1)
553 c2 = self.changelog.read(p2)
555 c2 = self.changelog.read(p2)
554 m1 = self.manifest.read(c1[0]).copy()
556 m1 = self.manifest.read(c1[0]).copy()
555 m2 = self.manifest.read(c2[0])
557 m2 = self.manifest.read(c2[0])
556 changed = []
558 changed = []
557 removed = []
559 removed = []
558
560
559 if orig_parent == p1:
561 if orig_parent == p1:
560 update_dirstate = 1
562 update_dirstate = 1
561 else:
563 else:
562 update_dirstate = 0
564 update_dirstate = 0
563
565
564 if not wlock:
566 if not wlock:
565 wlock = self.wlock()
567 wlock = self.wlock()
566 l = self.lock()
568 l = self.lock()
567 tr = self.transaction()
569 tr = self.transaction()
568 linkrev = self.changelog.count()
570 linkrev = self.changelog.count()
569 for f in files:
571 for f in files:
570 try:
572 try:
571 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
573 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
572 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
574 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
573 except IOError:
575 except IOError:
574 try:
576 try:
575 del m1[f]
577 del m1[f]
576 if update_dirstate:
578 if update_dirstate:
577 self.dirstate.forget([f])
579 self.dirstate.forget([f])
578 removed.append(f)
580 removed.append(f)
579 except:
581 except:
580 # deleted from p2?
582 # deleted from p2?
581 pass
583 pass
582
584
583 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
585 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
584 user = user or self.ui.username()
586 user = user or self.ui.username()
585 n = self.changelog.add(mnode, changed + removed, text,
587 n = self.changelog.add(mnode, changed + removed, text,
586 tr, p1, p2, user, date)
588 tr, p1, p2, user, date)
587 tr.close()
589 tr.close()
588 if update_dirstate:
590 if update_dirstate:
589 self.dirstate.setparents(n, nullid)
591 self.dirstate.setparents(n, nullid)
590
592
591 def commit(self, files=None, text="", user=None, date=None,
593 def commit(self, files=None, text="", user=None, date=None,
592 match=util.always, force=False, lock=None, wlock=None,
594 match=util.always, force=False, lock=None, wlock=None,
593 force_editor=False):
595 force_editor=False):
594 commit = []
596 commit = []
595 remove = []
597 remove = []
596 changed = []
598 changed = []
597
599
598 if files:
600 if files:
599 for f in files:
601 for f in files:
600 s = self.dirstate.state(f)
602 s = self.dirstate.state(f)
601 if s in 'nmai':
603 if s in 'nmai':
602 commit.append(f)
604 commit.append(f)
603 elif s == 'r':
605 elif s == 'r':
604 remove.append(f)
606 remove.append(f)
605 else:
607 else:
606 self.ui.warn(_("%s not tracked!\n") % f)
608 self.ui.warn(_("%s not tracked!\n") % f)
607 else:
609 else:
608 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
610 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
609 commit = modified + added
611 commit = modified + added
610 remove = removed
612 remove = removed
611
613
612 p1, p2 = self.dirstate.parents()
614 p1, p2 = self.dirstate.parents()
613 c1 = self.changelog.read(p1)
615 c1 = self.changelog.read(p1)
614 c2 = self.changelog.read(p2)
616 c2 = self.changelog.read(p2)
615 m1 = self.manifest.read(c1[0]).copy()
617 m1 = self.manifest.read(c1[0]).copy()
616 m2 = self.manifest.read(c2[0])
618 m2 = self.manifest.read(c2[0])
617
619
618 branchname = self.workingctx().branch()
620 branchname = self.workingctx().branch()
619 oldname = c1[5].get("branch", "")
621 oldname = c1[5].get("branch", "")
620
622
621 if not commit and not remove and not force and p2 == nullid and \
623 if not commit and not remove and not force and p2 == nullid and \
622 branchname == oldname:
624 branchname == oldname:
623 self.ui.status(_("nothing changed\n"))
625 self.ui.status(_("nothing changed\n"))
624 return None
626 return None
625
627
626 xp1 = hex(p1)
628 xp1 = hex(p1)
627 if p2 == nullid: xp2 = ''
629 if p2 == nullid: xp2 = ''
628 else: xp2 = hex(p2)
630 else: xp2 = hex(p2)
629
631
630 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
632 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
631
633
632 if not wlock:
634 if not wlock:
633 wlock = self.wlock()
635 wlock = self.wlock()
634 if not lock:
636 if not lock:
635 lock = self.lock()
637 lock = self.lock()
636 tr = self.transaction()
638 tr = self.transaction()
637
639
638 # check in files
640 # check in files
639 new = {}
641 new = {}
640 linkrev = self.changelog.count()
642 linkrev = self.changelog.count()
641 commit.sort()
643 commit.sort()
642 for f in commit:
644 for f in commit:
643 self.ui.note(f + "\n")
645 self.ui.note(f + "\n")
644 try:
646 try:
645 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
647 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
646 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
648 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
647 except IOError:
649 except IOError:
648 self.ui.warn(_("trouble committing %s!\n") % f)
650 self.ui.warn(_("trouble committing %s!\n") % f)
649 raise
651 raise
650
652
651 # update manifest
653 # update manifest
652 m1.update(new)
654 m1.update(new)
653 for f in remove:
655 for f in remove:
654 if f in m1:
656 if f in m1:
655 del m1[f]
657 del m1[f]
656 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
658 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
657
659
658 # add changeset
660 # add changeset
659 new = new.keys()
661 new = new.keys()
660 new.sort()
662 new.sort()
661
663
662 user = user or self.ui.username()
664 user = user or self.ui.username()
663 if not text or force_editor:
665 if not text or force_editor:
664 edittext = []
666 edittext = []
665 if text:
667 if text:
666 edittext.append(text)
668 edittext.append(text)
667 edittext.append("")
669 edittext.append("")
668 if p2 != nullid:
670 if p2 != nullid:
669 edittext.append("HG: branch merge")
671 edittext.append("HG: branch merge")
670 edittext.extend(["HG: changed %s" % f for f in changed])
672 edittext.extend(["HG: changed %s" % f for f in changed])
671 edittext.extend(["HG: removed %s" % f for f in remove])
673 edittext.extend(["HG: removed %s" % f for f in remove])
672 if not changed and not remove:
674 if not changed and not remove:
673 edittext.append("HG: no files changed")
675 edittext.append("HG: no files changed")
674 edittext.append("")
676 edittext.append("")
675 # run editor in the repository root
677 # run editor in the repository root
676 olddir = os.getcwd()
678 olddir = os.getcwd()
677 os.chdir(self.root)
679 os.chdir(self.root)
678 text = self.ui.edit("\n".join(edittext), user)
680 text = self.ui.edit("\n".join(edittext), user)
679 os.chdir(olddir)
681 os.chdir(olddir)
680
682
681 lines = [line.rstrip() for line in text.rstrip().splitlines()]
683 lines = [line.rstrip() for line in text.rstrip().splitlines()]
682 while lines and not lines[0]:
684 while lines and not lines[0]:
683 del lines[0]
685 del lines[0]
684 if not lines:
686 if not lines:
685 return None
687 return None
686 text = '\n'.join(lines)
688 text = '\n'.join(lines)
687 extra = {}
689 extra = {}
688 if branchname:
690 if branchname:
689 extra["branch"] = branchname
691 extra["branch"] = branchname
690 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
692 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
691 user, date, extra)
693 user, date, extra)
692 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
694 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
693 parent2=xp2)
695 parent2=xp2)
694 tr.close()
696 tr.close()
695
697
696 self.dirstate.setparents(n)
698 self.dirstate.setparents(n)
697 self.dirstate.update(new, "n")
699 self.dirstate.update(new, "n")
698 self.dirstate.forget(remove)
700 self.dirstate.forget(remove)
699
701
700 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
702 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
701 return n
703 return n
702
704
703 def walk(self, node=None, files=[], match=util.always, badmatch=None):
705 def walk(self, node=None, files=[], match=util.always, badmatch=None):
704 if node:
706 if node:
705 fdict = dict.fromkeys(files)
707 fdict = dict.fromkeys(files)
706 for fn in self.manifest.read(self.changelog.read(node)[0]):
708 for fn in self.manifest.read(self.changelog.read(node)[0]):
707 for ffn in fdict:
709 for ffn in fdict:
708 # match if the file is the exact name or a directory
710 # match if the file is the exact name or a directory
709 if ffn == fn or fn.startswith("%s/" % ffn):
711 if ffn == fn or fn.startswith("%s/" % ffn):
710 del fdict[ffn]
712 del fdict[ffn]
711 break
713 break
712 if match(fn):
714 if match(fn):
713 yield 'm', fn
715 yield 'm', fn
714 for fn in fdict:
716 for fn in fdict:
715 if badmatch and badmatch(fn):
717 if badmatch and badmatch(fn):
716 if match(fn):
718 if match(fn):
717 yield 'b', fn
719 yield 'b', fn
718 else:
720 else:
719 self.ui.warn(_('%s: No such file in rev %s\n') % (
721 self.ui.warn(_('%s: No such file in rev %s\n') % (
720 util.pathto(self.getcwd(), fn), short(node)))
722 util.pathto(self.getcwd(), fn), short(node)))
721 else:
723 else:
722 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
724 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
723 yield src, fn
725 yield src, fn
724
726
725 def status(self, node1=None, node2=None, files=[], match=util.always,
727 def status(self, node1=None, node2=None, files=[], match=util.always,
726 wlock=None, list_ignored=False, list_clean=False):
728 wlock=None, list_ignored=False, list_clean=False):
727 """return status of files between two nodes or node and working directory
729 """return status of files between two nodes or node and working directory
728
730
729 If node1 is None, use the first dirstate parent instead.
731 If node1 is None, use the first dirstate parent instead.
730 If node2 is None, compare node1 with working directory.
732 If node2 is None, compare node1 with working directory.
731 """
733 """
732
734
733 def fcmp(fn, mf):
735 def fcmp(fn, mf):
734 t1 = self.wread(fn)
736 t1 = self.wread(fn)
735 return self.file(fn).cmp(mf.get(fn, nullid), t1)
737 return self.file(fn).cmp(mf.get(fn, nullid), t1)
736
738
737 def mfmatches(node):
739 def mfmatches(node):
738 change = self.changelog.read(node)
740 change = self.changelog.read(node)
739 mf = self.manifest.read(change[0]).copy()
741 mf = self.manifest.read(change[0]).copy()
740 for fn in mf.keys():
742 for fn in mf.keys():
741 if not match(fn):
743 if not match(fn):
742 del mf[fn]
744 del mf[fn]
743 return mf
745 return mf
744
746
745 modified, added, removed, deleted, unknown = [], [], [], [], []
747 modified, added, removed, deleted, unknown = [], [], [], [], []
746 ignored, clean = [], []
748 ignored, clean = [], []
747
749
748 compareworking = False
750 compareworking = False
749 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
751 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
750 compareworking = True
752 compareworking = True
751
753
752 if not compareworking:
754 if not compareworking:
753 # read the manifest from node1 before the manifest from node2,
755 # read the manifest from node1 before the manifest from node2,
754 # so that we'll hit the manifest cache if we're going through
756 # so that we'll hit the manifest cache if we're going through
755 # all the revisions in parent->child order.
757 # all the revisions in parent->child order.
756 mf1 = mfmatches(node1)
758 mf1 = mfmatches(node1)
757
759
758 # are we comparing the working directory?
760 # are we comparing the working directory?
759 if not node2:
761 if not node2:
760 if not wlock:
762 if not wlock:
761 try:
763 try:
762 wlock = self.wlock(wait=0)
764 wlock = self.wlock(wait=0)
763 except lock.LockException:
765 except lock.LockException:
764 wlock = None
766 wlock = None
765 (lookup, modified, added, removed, deleted, unknown,
767 (lookup, modified, added, removed, deleted, unknown,
766 ignored, clean) = self.dirstate.status(files, match,
768 ignored, clean) = self.dirstate.status(files, match,
767 list_ignored, list_clean)
769 list_ignored, list_clean)
768
770
769 # are we comparing working dir against its parent?
771 # are we comparing working dir against its parent?
770 if compareworking:
772 if compareworking:
771 if lookup:
773 if lookup:
772 # do a full compare of any files that might have changed
774 # do a full compare of any files that might have changed
773 mf2 = mfmatches(self.dirstate.parents()[0])
775 mf2 = mfmatches(self.dirstate.parents()[0])
774 for f in lookup:
776 for f in lookup:
775 if fcmp(f, mf2):
777 if fcmp(f, mf2):
776 modified.append(f)
778 modified.append(f)
777 else:
779 else:
778 clean.append(f)
780 clean.append(f)
779 if wlock is not None:
781 if wlock is not None:
780 self.dirstate.update([f], "n")
782 self.dirstate.update([f], "n")
781 else:
783 else:
782 # we are comparing working dir against non-parent
784 # we are comparing working dir against non-parent
783 # generate a pseudo-manifest for the working dir
785 # generate a pseudo-manifest for the working dir
784 # XXX: create it in dirstate.py ?
786 # XXX: create it in dirstate.py ?
785 mf2 = mfmatches(self.dirstate.parents()[0])
787 mf2 = mfmatches(self.dirstate.parents()[0])
786 for f in lookup + modified + added:
788 for f in lookup + modified + added:
787 mf2[f] = ""
789 mf2[f] = ""
788 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
790 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
789 for f in removed:
791 for f in removed:
790 if f in mf2:
792 if f in mf2:
791 del mf2[f]
793 del mf2[f]
792 else:
794 else:
793 # we are comparing two revisions
795 # we are comparing two revisions
794 mf2 = mfmatches(node2)
796 mf2 = mfmatches(node2)
795
797
796 if not compareworking:
798 if not compareworking:
797 # flush lists from dirstate before comparing manifests
799 # flush lists from dirstate before comparing manifests
798 modified, added, clean = [], [], []
800 modified, added, clean = [], [], []
799
801
800 # make sure to sort the files so we talk to the disk in a
802 # make sure to sort the files so we talk to the disk in a
801 # reasonable order
803 # reasonable order
802 mf2keys = mf2.keys()
804 mf2keys = mf2.keys()
803 mf2keys.sort()
805 mf2keys.sort()
804 for fn in mf2keys:
806 for fn in mf2keys:
805 if mf1.has_key(fn):
807 if mf1.has_key(fn):
806 if mf1.flags(fn) != mf2.flags(fn) or \
808 if mf1.flags(fn) != mf2.flags(fn) or \
807 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
809 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
808 modified.append(fn)
810 modified.append(fn)
809 elif list_clean:
811 elif list_clean:
810 clean.append(fn)
812 clean.append(fn)
811 del mf1[fn]
813 del mf1[fn]
812 else:
814 else:
813 added.append(fn)
815 added.append(fn)
814
816
815 removed = mf1.keys()
817 removed = mf1.keys()
816
818
817 # sort and return results:
819 # sort and return results:
818 for l in modified, added, removed, deleted, unknown, ignored, clean:
820 for l in modified, added, removed, deleted, unknown, ignored, clean:
819 l.sort()
821 l.sort()
820 return (modified, added, removed, deleted, unknown, ignored, clean)
822 return (modified, added, removed, deleted, unknown, ignored, clean)
821
823
822 def add(self, list, wlock=None):
824 def add(self, list, wlock=None):
823 if not wlock:
825 if not wlock:
824 wlock = self.wlock()
826 wlock = self.wlock()
825 for f in list:
827 for f in list:
826 p = self.wjoin(f)
828 p = self.wjoin(f)
827 if not os.path.exists(p):
829 if not os.path.exists(p):
828 self.ui.warn(_("%s does not exist!\n") % f)
830 self.ui.warn(_("%s does not exist!\n") % f)
829 elif not os.path.isfile(p):
831 elif not os.path.isfile(p):
830 self.ui.warn(_("%s not added: only files supported currently\n")
832 self.ui.warn(_("%s not added: only files supported currently\n")
831 % f)
833 % f)
832 elif self.dirstate.state(f) in 'an':
834 elif self.dirstate.state(f) in 'an':
833 self.ui.warn(_("%s already tracked!\n") % f)
835 self.ui.warn(_("%s already tracked!\n") % f)
834 else:
836 else:
835 self.dirstate.update([f], "a")
837 self.dirstate.update([f], "a")
836
838
837 def forget(self, list, wlock=None):
839 def forget(self, list, wlock=None):
838 if not wlock:
840 if not wlock:
839 wlock = self.wlock()
841 wlock = self.wlock()
840 for f in list:
842 for f in list:
841 if self.dirstate.state(f) not in 'ai':
843 if self.dirstate.state(f) not in 'ai':
842 self.ui.warn(_("%s not added!\n") % f)
844 self.ui.warn(_("%s not added!\n") % f)
843 else:
845 else:
844 self.dirstate.forget([f])
846 self.dirstate.forget([f])
845
847
846 def remove(self, list, unlink=False, wlock=None):
848 def remove(self, list, unlink=False, wlock=None):
847 if unlink:
849 if unlink:
848 for f in list:
850 for f in list:
849 try:
851 try:
850 util.unlink(self.wjoin(f))
852 util.unlink(self.wjoin(f))
851 except OSError, inst:
853 except OSError, inst:
852 if inst.errno != errno.ENOENT:
854 if inst.errno != errno.ENOENT:
853 raise
855 raise
854 if not wlock:
856 if not wlock:
855 wlock = self.wlock()
857 wlock = self.wlock()
856 for f in list:
858 for f in list:
857 p = self.wjoin(f)
859 p = self.wjoin(f)
858 if os.path.exists(p):
860 if os.path.exists(p):
859 self.ui.warn(_("%s still exists!\n") % f)
861 self.ui.warn(_("%s still exists!\n") % f)
860 elif self.dirstate.state(f) == 'a':
862 elif self.dirstate.state(f) == 'a':
861 self.dirstate.forget([f])
863 self.dirstate.forget([f])
862 elif f not in self.dirstate:
864 elif f not in self.dirstate:
863 self.ui.warn(_("%s not tracked!\n") % f)
865 self.ui.warn(_("%s not tracked!\n") % f)
864 else:
866 else:
865 self.dirstate.update([f], "r")
867 self.dirstate.update([f], "r")
866
868
867 def undelete(self, list, wlock=None):
869 def undelete(self, list, wlock=None):
868 p = self.dirstate.parents()[0]
870 p = self.dirstate.parents()[0]
869 mn = self.changelog.read(p)[0]
871 mn = self.changelog.read(p)[0]
870 m = self.manifest.read(mn)
872 m = self.manifest.read(mn)
871 if not wlock:
873 if not wlock:
872 wlock = self.wlock()
874 wlock = self.wlock()
873 for f in list:
875 for f in list:
874 if self.dirstate.state(f) not in "r":
876 if self.dirstate.state(f) not in "r":
875 self.ui.warn("%s not removed!\n" % f)
877 self.ui.warn("%s not removed!\n" % f)
876 else:
878 else:
877 t = self.file(f).read(m[f])
879 t = self.file(f).read(m[f])
878 self.wwrite(f, t)
880 self.wwrite(f, t)
879 util.set_exec(self.wjoin(f), m.execf(f))
881 util.set_exec(self.wjoin(f), m.execf(f))
880 self.dirstate.update([f], "n")
882 self.dirstate.update([f], "n")
881
883
882 def copy(self, source, dest, wlock=None):
884 def copy(self, source, dest, wlock=None):
883 p = self.wjoin(dest)
885 p = self.wjoin(dest)
884 if not os.path.exists(p):
886 if not os.path.exists(p):
885 self.ui.warn(_("%s does not exist!\n") % dest)
887 self.ui.warn(_("%s does not exist!\n") % dest)
886 elif not os.path.isfile(p):
888 elif not os.path.isfile(p):
887 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
889 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
888 else:
890 else:
889 if not wlock:
891 if not wlock:
890 wlock = self.wlock()
892 wlock = self.wlock()
891 if self.dirstate.state(dest) == '?':
893 if self.dirstate.state(dest) == '?':
892 self.dirstate.update([dest], "a")
894 self.dirstate.update([dest], "a")
893 self.dirstate.copy(source, dest)
895 self.dirstate.copy(source, dest)
894
896
895 def heads(self, start=None):
897 def heads(self, start=None):
896 heads = self.changelog.heads(start)
898 heads = self.changelog.heads(start)
897 # sort the output in rev descending order
899 # sort the output in rev descending order
898 heads = [(-self.changelog.rev(h), h) for h in heads]
900 heads = [(-self.changelog.rev(h), h) for h in heads]
899 heads.sort()
901 heads.sort()
900 return [n for (r, n) in heads]
902 return [n for (r, n) in heads]
901
903
902 # branchlookup returns a dict giving a list of branches for
904 # branchlookup returns a dict giving a list of branches for
903 # each head. A branch is defined as the tag of a node or
905 # each head. A branch is defined as the tag of a node or
904 # the branch of the node's parents. If a node has multiple
906 # the branch of the node's parents. If a node has multiple
905 # branch tags, tags are eliminated if they are visible from other
907 # branch tags, tags are eliminated if they are visible from other
906 # branch tags.
908 # branch tags.
907 #
909 #
908 # So, for this graph: a->b->c->d->e
910 # So, for this graph: a->b->c->d->e
909 # \ /
911 # \ /
910 # aa -----/
912 # aa -----/
911 # a has tag 2.6.12
913 # a has tag 2.6.12
912 # d has tag 2.6.13
914 # d has tag 2.6.13
913 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
915 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
914 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
916 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
915 # from the list.
917 # from the list.
916 #
918 #
917 # It is possible that more than one head will have the same branch tag.
919 # It is possible that more than one head will have the same branch tag.
918 # callers need to check the result for multiple heads under the same
920 # callers need to check the result for multiple heads under the same
919 # branch tag if that is a problem for them (ie checkout of a specific
921 # branch tag if that is a problem for them (ie checkout of a specific
920 # branch).
922 # branch).
921 #
923 #
922 # passing in a specific branch will limit the depth of the search
924 # passing in a specific branch will limit the depth of the search
923 # through the parents. It won't limit the branches returned in the
925 # through the parents. It won't limit the branches returned in the
924 # result though.
926 # result though.
925 def branchlookup(self, heads=None, branch=None):
927 def branchlookup(self, heads=None, branch=None):
926 if not heads:
928 if not heads:
927 heads = self.heads()
929 heads = self.heads()
928 headt = [ h for h in heads ]
930 headt = [ h for h in heads ]
929 chlog = self.changelog
931 chlog = self.changelog
930 branches = {}
932 branches = {}
931 merges = []
933 merges = []
932 seenmerge = {}
934 seenmerge = {}
933
935
934 # traverse the tree once for each head, recording in the branches
936 # traverse the tree once for each head, recording in the branches
935 # dict which tags are visible from this head. The branches
937 # dict which tags are visible from this head. The branches
936 # dict also records which tags are visible from each tag
938 # dict also records which tags are visible from each tag
937 # while we traverse.
939 # while we traverse.
938 while headt or merges:
940 while headt or merges:
939 if merges:
941 if merges:
940 n, found = merges.pop()
942 n, found = merges.pop()
941 visit = [n]
943 visit = [n]
942 else:
944 else:
943 h = headt.pop()
945 h = headt.pop()
944 visit = [h]
946 visit = [h]
945 found = [h]
947 found = [h]
946 seen = {}
948 seen = {}
947 while visit:
949 while visit:
948 n = visit.pop()
950 n = visit.pop()
949 if n in seen:
951 if n in seen:
950 continue
952 continue
951 pp = chlog.parents(n)
953 pp = chlog.parents(n)
952 tags = self.nodetags(n)
954 tags = self.nodetags(n)
953 if tags:
955 if tags:
954 for x in tags:
956 for x in tags:
955 if x == 'tip':
957 if x == 'tip':
956 continue
958 continue
957 for f in found:
959 for f in found:
958 branches.setdefault(f, {})[n] = 1
960 branches.setdefault(f, {})[n] = 1
959 branches.setdefault(n, {})[n] = 1
961 branches.setdefault(n, {})[n] = 1
960 break
962 break
961 if n not in found:
963 if n not in found:
962 found.append(n)
964 found.append(n)
963 if branch in tags:
965 if branch in tags:
964 continue
966 continue
965 seen[n] = 1
967 seen[n] = 1
966 if pp[1] != nullid and n not in seenmerge:
968 if pp[1] != nullid and n not in seenmerge:
967 merges.append((pp[1], [x for x in found]))
969 merges.append((pp[1], [x for x in found]))
968 seenmerge[n] = 1
970 seenmerge[n] = 1
969 if pp[0] != nullid:
971 if pp[0] != nullid:
970 visit.append(pp[0])
972 visit.append(pp[0])
971 # traverse the branches dict, eliminating branch tags from each
973 # traverse the branches dict, eliminating branch tags from each
972 # head that are visible from another branch tag for that head.
974 # head that are visible from another branch tag for that head.
973 out = {}
975 out = {}
974 viscache = {}
976 viscache = {}
975 for h in heads:
977 for h in heads:
976 def visible(node):
978 def visible(node):
977 if node in viscache:
979 if node in viscache:
978 return viscache[node]
980 return viscache[node]
979 ret = {}
981 ret = {}
980 visit = [node]
982 visit = [node]
981 while visit:
983 while visit:
982 x = visit.pop()
984 x = visit.pop()
983 if x in viscache:
985 if x in viscache:
984 ret.update(viscache[x])
986 ret.update(viscache[x])
985 elif x not in ret:
987 elif x not in ret:
986 ret[x] = 1
988 ret[x] = 1
987 if x in branches:
989 if x in branches:
988 visit[len(visit):] = branches[x].keys()
990 visit[len(visit):] = branches[x].keys()
989 viscache[node] = ret
991 viscache[node] = ret
990 return ret
992 return ret
991 if h not in branches:
993 if h not in branches:
992 continue
994 continue
993 # O(n^2), but somewhat limited. This only searches the
995 # O(n^2), but somewhat limited. This only searches the
994 # tags visible from a specific head, not all the tags in the
996 # tags visible from a specific head, not all the tags in the
995 # whole repo.
997 # whole repo.
996 for b in branches[h]:
998 for b in branches[h]:
997 vis = False
999 vis = False
998 for bb in branches[h].keys():
1000 for bb in branches[h].keys():
999 if b != bb:
1001 if b != bb:
1000 if b in visible(bb):
1002 if b in visible(bb):
1001 vis = True
1003 vis = True
1002 break
1004 break
1003 if not vis:
1005 if not vis:
1004 l = out.setdefault(h, [])
1006 l = out.setdefault(h, [])
1005 l[len(l):] = self.nodetags(b)
1007 l[len(l):] = self.nodetags(b)
1006 return out
1008 return out
1007
1009
1008 def branches(self, nodes):
1010 def branches(self, nodes):
1009 if not nodes:
1011 if not nodes:
1010 nodes = [self.changelog.tip()]
1012 nodes = [self.changelog.tip()]
1011 b = []
1013 b = []
1012 for n in nodes:
1014 for n in nodes:
1013 t = n
1015 t = n
1014 while 1:
1016 while 1:
1015 p = self.changelog.parents(n)
1017 p = self.changelog.parents(n)
1016 if p[1] != nullid or p[0] == nullid:
1018 if p[1] != nullid or p[0] == nullid:
1017 b.append((t, n, p[0], p[1]))
1019 b.append((t, n, p[0], p[1]))
1018 break
1020 break
1019 n = p[0]
1021 n = p[0]
1020 return b
1022 return b
1021
1023
1022 def between(self, pairs):
1024 def between(self, pairs):
1023 r = []
1025 r = []
1024
1026
1025 for top, bottom in pairs:
1027 for top, bottom in pairs:
1026 n, l, i = top, [], 0
1028 n, l, i = top, [], 0
1027 f = 1
1029 f = 1
1028
1030
1029 while n != bottom:
1031 while n != bottom:
1030 p = self.changelog.parents(n)[0]
1032 p = self.changelog.parents(n)[0]
1031 if i == f:
1033 if i == f:
1032 l.append(n)
1034 l.append(n)
1033 f = f * 2
1035 f = f * 2
1034 n = p
1036 n = p
1035 i += 1
1037 i += 1
1036
1038
1037 r.append(l)
1039 r.append(l)
1038
1040
1039 return r
1041 return r
1040
1042
1041 def findincoming(self, remote, base=None, heads=None, force=False):
1043 def findincoming(self, remote, base=None, heads=None, force=False):
1042 """Return list of roots of the subsets of missing nodes from remote
1044 """Return list of roots of the subsets of missing nodes from remote
1043
1045
1044 If base dict is specified, assume that these nodes and their parents
1046 If base dict is specified, assume that these nodes and their parents
1045 exist on the remote side and that no child of a node of base exists
1047 exist on the remote side and that no child of a node of base exists
1046 in both remote and self.
1048 in both remote and self.
1047 Furthermore base will be updated to include the nodes that exists
1049 Furthermore base will be updated to include the nodes that exists
1048 in self and remote but no children exists in self and remote.
1050 in self and remote but no children exists in self and remote.
1049 If a list of heads is specified, return only nodes which are heads
1051 If a list of heads is specified, return only nodes which are heads
1050 or ancestors of these heads.
1052 or ancestors of these heads.
1051
1053
1052 All the ancestors of base are in self and in remote.
1054 All the ancestors of base are in self and in remote.
1053 All the descendants of the list returned are missing in self.
1055 All the descendants of the list returned are missing in self.
1054 (and so we know that the rest of the nodes are missing in remote, see
1056 (and so we know that the rest of the nodes are missing in remote, see
1055 outgoing)
1057 outgoing)
1056 """
1058 """
1057 m = self.changelog.nodemap
1059 m = self.changelog.nodemap
1058 search = []
1060 search = []
1059 fetch = {}
1061 fetch = {}
1060 seen = {}
1062 seen = {}
1061 seenbranch = {}
1063 seenbranch = {}
1062 if base == None:
1064 if base == None:
1063 base = {}
1065 base = {}
1064
1066
1065 if not heads:
1067 if not heads:
1066 heads = remote.heads()
1068 heads = remote.heads()
1067
1069
1068 if self.changelog.tip() == nullid:
1070 if self.changelog.tip() == nullid:
1069 base[nullid] = 1
1071 base[nullid] = 1
1070 if heads != [nullid]:
1072 if heads != [nullid]:
1071 return [nullid]
1073 return [nullid]
1072 return []
1074 return []
1073
1075
1074 # assume we're closer to the tip than the root
1076 # assume we're closer to the tip than the root
1075 # and start by examining the heads
1077 # and start by examining the heads
1076 self.ui.status(_("searching for changes\n"))
1078 self.ui.status(_("searching for changes\n"))
1077
1079
1078 unknown = []
1080 unknown = []
1079 for h in heads:
1081 for h in heads:
1080 if h not in m:
1082 if h not in m:
1081 unknown.append(h)
1083 unknown.append(h)
1082 else:
1084 else:
1083 base[h] = 1
1085 base[h] = 1
1084
1086
1085 if not unknown:
1087 if not unknown:
1086 return []
1088 return []
1087
1089
1088 req = dict.fromkeys(unknown)
1090 req = dict.fromkeys(unknown)
1089 reqcnt = 0
1091 reqcnt = 0
1090
1092
1091 # search through remote branches
1093 # search through remote branches
1092 # a 'branch' here is a linear segment of history, with four parts:
1094 # a 'branch' here is a linear segment of history, with four parts:
1093 # head, root, first parent, second parent
1095 # head, root, first parent, second parent
1094 # (a branch always has two parents (or none) by definition)
1096 # (a branch always has two parents (or none) by definition)
1095 unknown = remote.branches(unknown)
1097 unknown = remote.branches(unknown)
1096 while unknown:
1098 while unknown:
1097 r = []
1099 r = []
1098 while unknown:
1100 while unknown:
1099 n = unknown.pop(0)
1101 n = unknown.pop(0)
1100 if n[0] in seen:
1102 if n[0] in seen:
1101 continue
1103 continue
1102
1104
1103 self.ui.debug(_("examining %s:%s\n")
1105 self.ui.debug(_("examining %s:%s\n")
1104 % (short(n[0]), short(n[1])))
1106 % (short(n[0]), short(n[1])))
1105 if n[0] == nullid: # found the end of the branch
1107 if n[0] == nullid: # found the end of the branch
1106 pass
1108 pass
1107 elif n in seenbranch:
1109 elif n in seenbranch:
1108 self.ui.debug(_("branch already found\n"))
1110 self.ui.debug(_("branch already found\n"))
1109 continue
1111 continue
1110 elif n[1] and n[1] in m: # do we know the base?
1112 elif n[1] and n[1] in m: # do we know the base?
1111 self.ui.debug(_("found incomplete branch %s:%s\n")
1113 self.ui.debug(_("found incomplete branch %s:%s\n")
1112 % (short(n[0]), short(n[1])))
1114 % (short(n[0]), short(n[1])))
1113 search.append(n) # schedule branch range for scanning
1115 search.append(n) # schedule branch range for scanning
1114 seenbranch[n] = 1
1116 seenbranch[n] = 1
1115 else:
1117 else:
1116 if n[1] not in seen and n[1] not in fetch:
1118 if n[1] not in seen and n[1] not in fetch:
1117 if n[2] in m and n[3] in m:
1119 if n[2] in m and n[3] in m:
1118 self.ui.debug(_("found new changeset %s\n") %
1120 self.ui.debug(_("found new changeset %s\n") %
1119 short(n[1]))
1121 short(n[1]))
1120 fetch[n[1]] = 1 # earliest unknown
1122 fetch[n[1]] = 1 # earliest unknown
1121 for p in n[2:4]:
1123 for p in n[2:4]:
1122 if p in m:
1124 if p in m:
1123 base[p] = 1 # latest known
1125 base[p] = 1 # latest known
1124
1126
1125 for p in n[2:4]:
1127 for p in n[2:4]:
1126 if p not in req and p not in m:
1128 if p not in req and p not in m:
1127 r.append(p)
1129 r.append(p)
1128 req[p] = 1
1130 req[p] = 1
1129 seen[n[0]] = 1
1131 seen[n[0]] = 1
1130
1132
1131 if r:
1133 if r:
1132 reqcnt += 1
1134 reqcnt += 1
1133 self.ui.debug(_("request %d: %s\n") %
1135 self.ui.debug(_("request %d: %s\n") %
1134 (reqcnt, " ".join(map(short, r))))
1136 (reqcnt, " ".join(map(short, r))))
1135 for p in range(0, len(r), 10):
1137 for p in range(0, len(r), 10):
1136 for b in remote.branches(r[p:p+10]):
1138 for b in remote.branches(r[p:p+10]):
1137 self.ui.debug(_("received %s:%s\n") %
1139 self.ui.debug(_("received %s:%s\n") %
1138 (short(b[0]), short(b[1])))
1140 (short(b[0]), short(b[1])))
1139 unknown.append(b)
1141 unknown.append(b)
1140
1142
1141 # do binary search on the branches we found
1143 # do binary search on the branches we found
1142 while search:
1144 while search:
1143 n = search.pop(0)
1145 n = search.pop(0)
1144 reqcnt += 1
1146 reqcnt += 1
1145 l = remote.between([(n[0], n[1])])[0]
1147 l = remote.between([(n[0], n[1])])[0]
1146 l.append(n[1])
1148 l.append(n[1])
1147 p = n[0]
1149 p = n[0]
1148 f = 1
1150 f = 1
1149 for i in l:
1151 for i in l:
1150 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1152 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1151 if i in m:
1153 if i in m:
1152 if f <= 2:
1154 if f <= 2:
1153 self.ui.debug(_("found new branch changeset %s\n") %
1155 self.ui.debug(_("found new branch changeset %s\n") %
1154 short(p))
1156 short(p))
1155 fetch[p] = 1
1157 fetch[p] = 1
1156 base[i] = 1
1158 base[i] = 1
1157 else:
1159 else:
1158 self.ui.debug(_("narrowed branch search to %s:%s\n")
1160 self.ui.debug(_("narrowed branch search to %s:%s\n")
1159 % (short(p), short(i)))
1161 % (short(p), short(i)))
1160 search.append((p, i))
1162 search.append((p, i))
1161 break
1163 break
1162 p, f = i, f * 2
1164 p, f = i, f * 2
1163
1165
1164 # sanity check our fetch list
1166 # sanity check our fetch list
1165 for f in fetch.keys():
1167 for f in fetch.keys():
1166 if f in m:
1168 if f in m:
1167 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1169 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1168
1170
1169 if base.keys() == [nullid]:
1171 if base.keys() == [nullid]:
1170 if force:
1172 if force:
1171 self.ui.warn(_("warning: repository is unrelated\n"))
1173 self.ui.warn(_("warning: repository is unrelated\n"))
1172 else:
1174 else:
1173 raise util.Abort(_("repository is unrelated"))
1175 raise util.Abort(_("repository is unrelated"))
1174
1176
1175 self.ui.debug(_("found new changesets starting at ") +
1177 self.ui.debug(_("found new changesets starting at ") +
1176 " ".join([short(f) for f in fetch]) + "\n")
1178 " ".join([short(f) for f in fetch]) + "\n")
1177
1179
1178 self.ui.debug(_("%d total queries\n") % reqcnt)
1180 self.ui.debug(_("%d total queries\n") % reqcnt)
1179
1181
1180 return fetch.keys()
1182 return fetch.keys()
1181
1183
1182 def findoutgoing(self, remote, base=None, heads=None, force=False):
1184 def findoutgoing(self, remote, base=None, heads=None, force=False):
1183 """Return list of nodes that are roots of subsets not in remote
1185 """Return list of nodes that are roots of subsets not in remote
1184
1186
1185 If base dict is specified, assume that these nodes and their parents
1187 If base dict is specified, assume that these nodes and their parents
1186 exist on the remote side.
1188 exist on the remote side.
1187 If a list of heads is specified, return only nodes which are heads
1189 If a list of heads is specified, return only nodes which are heads
1188 or ancestors of these heads, and return a second element which
1190 or ancestors of these heads, and return a second element which
1189 contains all remote heads which get new children.
1191 contains all remote heads which get new children.
1190 """
1192 """
1191 if base == None:
1193 if base == None:
1192 base = {}
1194 base = {}
1193 self.findincoming(remote, base, heads, force=force)
1195 self.findincoming(remote, base, heads, force=force)
1194
1196
1195 self.ui.debug(_("common changesets up to ")
1197 self.ui.debug(_("common changesets up to ")
1196 + " ".join(map(short, base.keys())) + "\n")
1198 + " ".join(map(short, base.keys())) + "\n")
1197
1199
1198 remain = dict.fromkeys(self.changelog.nodemap)
1200 remain = dict.fromkeys(self.changelog.nodemap)
1199
1201
1200 # prune everything remote has from the tree
1202 # prune everything remote has from the tree
1201 del remain[nullid]
1203 del remain[nullid]
1202 remove = base.keys()
1204 remove = base.keys()
1203 while remove:
1205 while remove:
1204 n = remove.pop(0)
1206 n = remove.pop(0)
1205 if n in remain:
1207 if n in remain:
1206 del remain[n]
1208 del remain[n]
1207 for p in self.changelog.parents(n):
1209 for p in self.changelog.parents(n):
1208 remove.append(p)
1210 remove.append(p)
1209
1211
1210 # find every node whose parents have been pruned
1212 # find every node whose parents have been pruned
1211 subset = []
1213 subset = []
1212 # find every remote head that will get new children
1214 # find every remote head that will get new children
1213 updated_heads = {}
1215 updated_heads = {}
1214 for n in remain:
1216 for n in remain:
1215 p1, p2 = self.changelog.parents(n)
1217 p1, p2 = self.changelog.parents(n)
1216 if p1 not in remain and p2 not in remain:
1218 if p1 not in remain and p2 not in remain:
1217 subset.append(n)
1219 subset.append(n)
1218 if heads:
1220 if heads:
1219 if p1 in heads:
1221 if p1 in heads:
1220 updated_heads[p1] = True
1222 updated_heads[p1] = True
1221 if p2 in heads:
1223 if p2 in heads:
1222 updated_heads[p2] = True
1224 updated_heads[p2] = True
1223
1225
1224 # this is the set of all roots we have to push
1226 # this is the set of all roots we have to push
1225 if heads:
1227 if heads:
1226 return subset, updated_heads.keys()
1228 return subset, updated_heads.keys()
1227 else:
1229 else:
1228 return subset
1230 return subset
1229
1231
1230 def pull(self, remote, heads=None, force=False, lock=None):
1232 def pull(self, remote, heads=None, force=False, lock=None):
1231 mylock = False
1233 mylock = False
1232 if not lock:
1234 if not lock:
1233 lock = self.lock()
1235 lock = self.lock()
1234 mylock = True
1236 mylock = True
1235
1237
1236 try:
1238 try:
1237 fetch = self.findincoming(remote, force=force)
1239 fetch = self.findincoming(remote, force=force)
1238 if fetch == [nullid]:
1240 if fetch == [nullid]:
1239 self.ui.status(_("requesting all changes\n"))
1241 self.ui.status(_("requesting all changes\n"))
1240
1242
1241 if not fetch:
1243 if not fetch:
1242 self.ui.status(_("no changes found\n"))
1244 self.ui.status(_("no changes found\n"))
1243 return 0
1245 return 0
1244
1246
1245 if heads is None:
1247 if heads is None:
1246 cg = remote.changegroup(fetch, 'pull')
1248 cg = remote.changegroup(fetch, 'pull')
1247 else:
1249 else:
1248 if 'changegroupsubset' not in remote.capabilities:
1250 if 'changegroupsubset' not in remote.capabilities:
1249 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1251 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1250 cg = remote.changegroupsubset(fetch, heads, 'pull')
1252 cg = remote.changegroupsubset(fetch, heads, 'pull')
1251 return self.addchangegroup(cg, 'pull', remote.url())
1253 return self.addchangegroup(cg, 'pull', remote.url())
1252 finally:
1254 finally:
1253 if mylock:
1255 if mylock:
1254 lock.release()
1256 lock.release()
1255
1257
1256 def push(self, remote, force=False, revs=None):
1258 def push(self, remote, force=False, revs=None):
1257 # there are two ways to push to remote repo:
1259 # there are two ways to push to remote repo:
1258 #
1260 #
1259 # addchangegroup assumes local user can lock remote
1261 # addchangegroup assumes local user can lock remote
1260 # repo (local filesystem, old ssh servers).
1262 # repo (local filesystem, old ssh servers).
1261 #
1263 #
1262 # unbundle assumes local user cannot lock remote repo (new ssh
1264 # unbundle assumes local user cannot lock remote repo (new ssh
1263 # servers, http servers).
1265 # servers, http servers).
1264
1266
1265 if remote.capable('unbundle'):
1267 if remote.capable('unbundle'):
1266 return self.push_unbundle(remote, force, revs)
1268 return self.push_unbundle(remote, force, revs)
1267 return self.push_addchangegroup(remote, force, revs)
1269 return self.push_addchangegroup(remote, force, revs)
1268
1270
1269 def prepush(self, remote, force, revs):
1271 def prepush(self, remote, force, revs):
1270 base = {}
1272 base = {}
1271 remote_heads = remote.heads()
1273 remote_heads = remote.heads()
1272 inc = self.findincoming(remote, base, remote_heads, force=force)
1274 inc = self.findincoming(remote, base, remote_heads, force=force)
1273 if not force and inc:
1275 if not force and inc:
1274 self.ui.warn(_("abort: unsynced remote changes!\n"))
1276 self.ui.warn(_("abort: unsynced remote changes!\n"))
1275 self.ui.status(_("(did you forget to sync?"
1277 self.ui.status(_("(did you forget to sync?"
1276 " use push -f to force)\n"))
1278 " use push -f to force)\n"))
1277 return None, 1
1279 return None, 1
1278
1280
1279 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1281 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1280 if revs is not None:
1282 if revs is not None:
1281 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1283 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1282 else:
1284 else:
1283 bases, heads = update, self.changelog.heads()
1285 bases, heads = update, self.changelog.heads()
1284
1286
1285 if not bases:
1287 if not bases:
1286 self.ui.status(_("no changes found\n"))
1288 self.ui.status(_("no changes found\n"))
1287 return None, 1
1289 return None, 1
1288 elif not force:
1290 elif not force:
1289 # FIXME we don't properly detect creation of new heads
1291 # FIXME we don't properly detect creation of new heads
1290 # in the push -r case, assume the user knows what he's doing
1292 # in the push -r case, assume the user knows what he's doing
1291 if not revs and len(remote_heads) < len(heads) \
1293 if not revs and len(remote_heads) < len(heads) \
1292 and remote_heads != [nullid]:
1294 and remote_heads != [nullid]:
1293 self.ui.warn(_("abort: push creates new remote branches!\n"))
1295 self.ui.warn(_("abort: push creates new remote branches!\n"))
1294 self.ui.status(_("(did you forget to merge?"
1296 self.ui.status(_("(did you forget to merge?"
1295 " use push -f to force)\n"))
1297 " use push -f to force)\n"))
1296 return None, 1
1298 return None, 1
1297
1299
1298 if revs is None:
1300 if revs is None:
1299 cg = self.changegroup(update, 'push')
1301 cg = self.changegroup(update, 'push')
1300 else:
1302 else:
1301 cg = self.changegroupsubset(update, revs, 'push')
1303 cg = self.changegroupsubset(update, revs, 'push')
1302 return cg, remote_heads
1304 return cg, remote_heads
1303
1305
1304 def push_addchangegroup(self, remote, force, revs):
1306 def push_addchangegroup(self, remote, force, revs):
1305 lock = remote.lock()
1307 lock = remote.lock()
1306
1308
1307 ret = self.prepush(remote, force, revs)
1309 ret = self.prepush(remote, force, revs)
1308 if ret[0] is not None:
1310 if ret[0] is not None:
1309 cg, remote_heads = ret
1311 cg, remote_heads = ret
1310 return remote.addchangegroup(cg, 'push', self.url())
1312 return remote.addchangegroup(cg, 'push', self.url())
1311 return ret[1]
1313 return ret[1]
1312
1314
1313 def push_unbundle(self, remote, force, revs):
1315 def push_unbundle(self, remote, force, revs):
1314 # local repo finds heads on server, finds out what revs it
1316 # local repo finds heads on server, finds out what revs it
1315 # must push. once revs transferred, if server finds it has
1317 # must push. once revs transferred, if server finds it has
1316 # different heads (someone else won commit/push race), server
1318 # different heads (someone else won commit/push race), server
1317 # aborts.
1319 # aborts.
1318
1320
1319 ret = self.prepush(remote, force, revs)
1321 ret = self.prepush(remote, force, revs)
1320 if ret[0] is not None:
1322 if ret[0] is not None:
1321 cg, remote_heads = ret
1323 cg, remote_heads = ret
1322 if force: remote_heads = ['force']
1324 if force: remote_heads = ['force']
1323 return remote.unbundle(cg, remote_heads, 'push')
1325 return remote.unbundle(cg, remote_heads, 'push')
1324 return ret[1]
1326 return ret[1]
1325
1327
1326 def changegroupsubset(self, bases, heads, source):
1328 def changegroupsubset(self, bases, heads, source):
1327 """This function generates a changegroup consisting of all the nodes
1329 """This function generates a changegroup consisting of all the nodes
1328 that are descendents of any of the bases, and ancestors of any of
1330 that are descendents of any of the bases, and ancestors of any of
1329 the heads.
1331 the heads.
1330
1332
1331 It is fairly complex as determining which filenodes and which
1333 It is fairly complex as determining which filenodes and which
1332 manifest nodes need to be included for the changeset to be complete
1334 manifest nodes need to be included for the changeset to be complete
1333 is non-trivial.
1335 is non-trivial.
1334
1336
1335 Another wrinkle is doing the reverse, figuring out which changeset in
1337 Another wrinkle is doing the reverse, figuring out which changeset in
1336 the changegroup a particular filenode or manifestnode belongs to."""
1338 the changegroup a particular filenode or manifestnode belongs to."""
1337
1339
1338 self.hook('preoutgoing', throw=True, source=source)
1340 self.hook('preoutgoing', throw=True, source=source)
1339
1341
1340 # Set up some initial variables
1342 # Set up some initial variables
1341 # Make it easy to refer to self.changelog
1343 # Make it easy to refer to self.changelog
1342 cl = self.changelog
1344 cl = self.changelog
1343 # msng is short for missing - compute the list of changesets in this
1345 # msng is short for missing - compute the list of changesets in this
1344 # changegroup.
1346 # changegroup.
1345 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1347 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1346 # Some bases may turn out to be superfluous, and some heads may be
1348 # Some bases may turn out to be superfluous, and some heads may be
1347 # too. nodesbetween will return the minimal set of bases and heads
1349 # too. nodesbetween will return the minimal set of bases and heads
1348 # necessary to re-create the changegroup.
1350 # necessary to re-create the changegroup.
1349
1351
1350 # Known heads are the list of heads that it is assumed the recipient
1352 # Known heads are the list of heads that it is assumed the recipient
1351 # of this changegroup will know about.
1353 # of this changegroup will know about.
1352 knownheads = {}
1354 knownheads = {}
1353 # We assume that all parents of bases are known heads.
1355 # We assume that all parents of bases are known heads.
1354 for n in bases:
1356 for n in bases:
1355 for p in cl.parents(n):
1357 for p in cl.parents(n):
1356 if p != nullid:
1358 if p != nullid:
1357 knownheads[p] = 1
1359 knownheads[p] = 1
1358 knownheads = knownheads.keys()
1360 knownheads = knownheads.keys()
1359 if knownheads:
1361 if knownheads:
1360 # Now that we know what heads are known, we can compute which
1362 # Now that we know what heads are known, we can compute which
1361 # changesets are known. The recipient must know about all
1363 # changesets are known. The recipient must know about all
1362 # changesets required to reach the known heads from the null
1364 # changesets required to reach the known heads from the null
1363 # changeset.
1365 # changeset.
1364 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1366 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1365 junk = None
1367 junk = None
1366 # Transform the list into an ersatz set.
1368 # Transform the list into an ersatz set.
1367 has_cl_set = dict.fromkeys(has_cl_set)
1369 has_cl_set = dict.fromkeys(has_cl_set)
1368 else:
1370 else:
1369 # If there were no known heads, the recipient cannot be assumed to
1371 # If there were no known heads, the recipient cannot be assumed to
1370 # know about any changesets.
1372 # know about any changesets.
1371 has_cl_set = {}
1373 has_cl_set = {}
1372
1374
1373 # Make it easy to refer to self.manifest
1375 # Make it easy to refer to self.manifest
1374 mnfst = self.manifest
1376 mnfst = self.manifest
1375 # We don't know which manifests are missing yet
1377 # We don't know which manifests are missing yet
1376 msng_mnfst_set = {}
1378 msng_mnfst_set = {}
1377 # Nor do we know which filenodes are missing.
1379 # Nor do we know which filenodes are missing.
1378 msng_filenode_set = {}
1380 msng_filenode_set = {}
1379
1381
1380 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1382 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1381 junk = None
1383 junk = None
1382
1384
1383 # A changeset always belongs to itself, so the changenode lookup
1385 # A changeset always belongs to itself, so the changenode lookup
1384 # function for a changenode is identity.
1386 # function for a changenode is identity.
1385 def identity(x):
1387 def identity(x):
1386 return x
1388 return x
1387
1389
1388 # A function generating function. Sets up an environment for the
1390 # A function generating function. Sets up an environment for the
1389 # inner function.
1391 # inner function.
1390 def cmp_by_rev_func(revlog):
1392 def cmp_by_rev_func(revlog):
1391 # Compare two nodes by their revision number in the environment's
1393 # Compare two nodes by their revision number in the environment's
1392 # revision history. Since the revision number both represents the
1394 # revision history. Since the revision number both represents the
1393 # most efficient order to read the nodes in, and represents a
1395 # most efficient order to read the nodes in, and represents a
1394 # topological sorting of the nodes, this function is often useful.
1396 # topological sorting of the nodes, this function is often useful.
1395 def cmp_by_rev(a, b):
1397 def cmp_by_rev(a, b):
1396 return cmp(revlog.rev(a), revlog.rev(b))
1398 return cmp(revlog.rev(a), revlog.rev(b))
1397 return cmp_by_rev
1399 return cmp_by_rev
1398
1400
1399 # If we determine that a particular file or manifest node must be a
1401 # If we determine that a particular file or manifest node must be a
1400 # node that the recipient of the changegroup will already have, we can
1402 # node that the recipient of the changegroup will already have, we can
1401 # also assume the recipient will have all the parents. This function
1403 # also assume the recipient will have all the parents. This function
1402 # prunes them from the set of missing nodes.
1404 # prunes them from the set of missing nodes.
1403 def prune_parents(revlog, hasset, msngset):
1405 def prune_parents(revlog, hasset, msngset):
1404 haslst = hasset.keys()
1406 haslst = hasset.keys()
1405 haslst.sort(cmp_by_rev_func(revlog))
1407 haslst.sort(cmp_by_rev_func(revlog))
1406 for node in haslst:
1408 for node in haslst:
1407 parentlst = [p for p in revlog.parents(node) if p != nullid]
1409 parentlst = [p for p in revlog.parents(node) if p != nullid]
1408 while parentlst:
1410 while parentlst:
1409 n = parentlst.pop()
1411 n = parentlst.pop()
1410 if n not in hasset:
1412 if n not in hasset:
1411 hasset[n] = 1
1413 hasset[n] = 1
1412 p = [p for p in revlog.parents(n) if p != nullid]
1414 p = [p for p in revlog.parents(n) if p != nullid]
1413 parentlst.extend(p)
1415 parentlst.extend(p)
1414 for n in hasset:
1416 for n in hasset:
1415 msngset.pop(n, None)
1417 msngset.pop(n, None)
1416
1418
1417 # This is a function generating function used to set up an environment
1419 # This is a function generating function used to set up an environment
1418 # for the inner function to execute in.
1420 # for the inner function to execute in.
1419 def manifest_and_file_collector(changedfileset):
1421 def manifest_and_file_collector(changedfileset):
1420 # This is an information gathering function that gathers
1422 # This is an information gathering function that gathers
1421 # information from each changeset node that goes out as part of
1423 # information from each changeset node that goes out as part of
1422 # the changegroup. The information gathered is a list of which
1424 # the changegroup. The information gathered is a list of which
1423 # manifest nodes are potentially required (the recipient may
1425 # manifest nodes are potentially required (the recipient may
1424 # already have them) and total list of all files which were
1426 # already have them) and total list of all files which were
1425 # changed in any changeset in the changegroup.
1427 # changed in any changeset in the changegroup.
1426 #
1428 #
1427 # We also remember the first changenode we saw any manifest
1429 # We also remember the first changenode we saw any manifest
1428 # referenced by so we can later determine which changenode 'owns'
1430 # referenced by so we can later determine which changenode 'owns'
1429 # the manifest.
1431 # the manifest.
1430 def collect_manifests_and_files(clnode):
1432 def collect_manifests_and_files(clnode):
1431 c = cl.read(clnode)
1433 c = cl.read(clnode)
1432 for f in c[3]:
1434 for f in c[3]:
1433 # This is to make sure we only have one instance of each
1435 # This is to make sure we only have one instance of each
1434 # filename string for each filename.
1436 # filename string for each filename.
1435 changedfileset.setdefault(f, f)
1437 changedfileset.setdefault(f, f)
1436 msng_mnfst_set.setdefault(c[0], clnode)
1438 msng_mnfst_set.setdefault(c[0], clnode)
1437 return collect_manifests_and_files
1439 return collect_manifests_and_files
1438
1440
1439 # Figure out which manifest nodes (of the ones we think might be part
1441 # Figure out which manifest nodes (of the ones we think might be part
1440 # of the changegroup) the recipient must know about and remove them
1442 # of the changegroup) the recipient must know about and remove them
1441 # from the changegroup.
1443 # from the changegroup.
1442 def prune_manifests():
1444 def prune_manifests():
1443 has_mnfst_set = {}
1445 has_mnfst_set = {}
1444 for n in msng_mnfst_set:
1446 for n in msng_mnfst_set:
1445 # If a 'missing' manifest thinks it belongs to a changenode
1447 # If a 'missing' manifest thinks it belongs to a changenode
1446 # the recipient is assumed to have, obviously the recipient
1448 # the recipient is assumed to have, obviously the recipient
1447 # must have that manifest.
1449 # must have that manifest.
1448 linknode = cl.node(mnfst.linkrev(n))
1450 linknode = cl.node(mnfst.linkrev(n))
1449 if linknode in has_cl_set:
1451 if linknode in has_cl_set:
1450 has_mnfst_set[n] = 1
1452 has_mnfst_set[n] = 1
1451 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1453 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1452
1454
1453 # Use the information collected in collect_manifests_and_files to say
1455 # Use the information collected in collect_manifests_and_files to say
1454 # which changenode any manifestnode belongs to.
1456 # which changenode any manifestnode belongs to.
1455 def lookup_manifest_link(mnfstnode):
1457 def lookup_manifest_link(mnfstnode):
1456 return msng_mnfst_set[mnfstnode]
1458 return msng_mnfst_set[mnfstnode]
1457
1459
1458 # A function generating function that sets up the initial environment
1460 # A function generating function that sets up the initial environment
1459 # the inner function.
1461 # the inner function.
1460 def filenode_collector(changedfiles):
1462 def filenode_collector(changedfiles):
1461 next_rev = [0]
1463 next_rev = [0]
1462 # This gathers information from each manifestnode included in the
1464 # This gathers information from each manifestnode included in the
1463 # changegroup about which filenodes the manifest node references
1465 # changegroup about which filenodes the manifest node references
1464 # so we can include those in the changegroup too.
1466 # so we can include those in the changegroup too.
1465 #
1467 #
1466 # It also remembers which changenode each filenode belongs to. It
1468 # It also remembers which changenode each filenode belongs to. It
1467 # does this by assuming the a filenode belongs to the changenode
1469 # does this by assuming the a filenode belongs to the changenode
1468 # the first manifest that references it belongs to.
1470 # the first manifest that references it belongs to.
1469 def collect_msng_filenodes(mnfstnode):
1471 def collect_msng_filenodes(mnfstnode):
1470 r = mnfst.rev(mnfstnode)
1472 r = mnfst.rev(mnfstnode)
1471 if r == next_rev[0]:
1473 if r == next_rev[0]:
1472 # If the last rev we looked at was the one just previous,
1474 # If the last rev we looked at was the one just previous,
1473 # we only need to see a diff.
1475 # we only need to see a diff.
1474 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1476 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1475 # For each line in the delta
1477 # For each line in the delta
1476 for dline in delta.splitlines():
1478 for dline in delta.splitlines():
1477 # get the filename and filenode for that line
1479 # get the filename and filenode for that line
1478 f, fnode = dline.split('\0')
1480 f, fnode = dline.split('\0')
1479 fnode = bin(fnode[:40])
1481 fnode = bin(fnode[:40])
1480 f = changedfiles.get(f, None)
1482 f = changedfiles.get(f, None)
1481 # And if the file is in the list of files we care
1483 # And if the file is in the list of files we care
1482 # about.
1484 # about.
1483 if f is not None:
1485 if f is not None:
1484 # Get the changenode this manifest belongs to
1486 # Get the changenode this manifest belongs to
1485 clnode = msng_mnfst_set[mnfstnode]
1487 clnode = msng_mnfst_set[mnfstnode]
1486 # Create the set of filenodes for the file if
1488 # Create the set of filenodes for the file if
1487 # there isn't one already.
1489 # there isn't one already.
1488 ndset = msng_filenode_set.setdefault(f, {})
1490 ndset = msng_filenode_set.setdefault(f, {})
1489 # And set the filenode's changelog node to the
1491 # And set the filenode's changelog node to the
1490 # manifest's if it hasn't been set already.
1492 # manifest's if it hasn't been set already.
1491 ndset.setdefault(fnode, clnode)
1493 ndset.setdefault(fnode, clnode)
1492 else:
1494 else:
1493 # Otherwise we need a full manifest.
1495 # Otherwise we need a full manifest.
1494 m = mnfst.read(mnfstnode)
1496 m = mnfst.read(mnfstnode)
1495 # For every file in we care about.
1497 # For every file in we care about.
1496 for f in changedfiles:
1498 for f in changedfiles:
1497 fnode = m.get(f, None)
1499 fnode = m.get(f, None)
1498 # If it's in the manifest
1500 # If it's in the manifest
1499 if fnode is not None:
1501 if fnode is not None:
1500 # See comments above.
1502 # See comments above.
1501 clnode = msng_mnfst_set[mnfstnode]
1503 clnode = msng_mnfst_set[mnfstnode]
1502 ndset = msng_filenode_set.setdefault(f, {})
1504 ndset = msng_filenode_set.setdefault(f, {})
1503 ndset.setdefault(fnode, clnode)
1505 ndset.setdefault(fnode, clnode)
1504 # Remember the revision we hope to see next.
1506 # Remember the revision we hope to see next.
1505 next_rev[0] = r + 1
1507 next_rev[0] = r + 1
1506 return collect_msng_filenodes
1508 return collect_msng_filenodes
1507
1509
1508 # We have a list of filenodes we think we need for a file, lets remove
1510 # We have a list of filenodes we think we need for a file, lets remove
1509 # all those we now the recipient must have.
1511 # all those we now the recipient must have.
1510 def prune_filenodes(f, filerevlog):
1512 def prune_filenodes(f, filerevlog):
1511 msngset = msng_filenode_set[f]
1513 msngset = msng_filenode_set[f]
1512 hasset = {}
1514 hasset = {}
1513 # If a 'missing' filenode thinks it belongs to a changenode we
1515 # If a 'missing' filenode thinks it belongs to a changenode we
1514 # assume the recipient must have, then the recipient must have
1516 # assume the recipient must have, then the recipient must have
1515 # that filenode.
1517 # that filenode.
1516 for n in msngset:
1518 for n in msngset:
1517 clnode = cl.node(filerevlog.linkrev(n))
1519 clnode = cl.node(filerevlog.linkrev(n))
1518 if clnode in has_cl_set:
1520 if clnode in has_cl_set:
1519 hasset[n] = 1
1521 hasset[n] = 1
1520 prune_parents(filerevlog, hasset, msngset)
1522 prune_parents(filerevlog, hasset, msngset)
1521
1523
1522 # A function generator function that sets up the a context for the
1524 # A function generator function that sets up the a context for the
1523 # inner function.
1525 # inner function.
1524 def lookup_filenode_link_func(fname):
1526 def lookup_filenode_link_func(fname):
1525 msngset = msng_filenode_set[fname]
1527 msngset = msng_filenode_set[fname]
1526 # Lookup the changenode the filenode belongs to.
1528 # Lookup the changenode the filenode belongs to.
1527 def lookup_filenode_link(fnode):
1529 def lookup_filenode_link(fnode):
1528 return msngset[fnode]
1530 return msngset[fnode]
1529 return lookup_filenode_link
1531 return lookup_filenode_link
1530
1532
1531 # Now that we have all theses utility functions to help out and
1533 # Now that we have all theses utility functions to help out and
1532 # logically divide up the task, generate the group.
1534 # logically divide up the task, generate the group.
1533 def gengroup():
1535 def gengroup():
1534 # The set of changed files starts empty.
1536 # The set of changed files starts empty.
1535 changedfiles = {}
1537 changedfiles = {}
1536 # Create a changenode group generator that will call our functions
1538 # Create a changenode group generator that will call our functions
1537 # back to lookup the owning changenode and collect information.
1539 # back to lookup the owning changenode and collect information.
1538 group = cl.group(msng_cl_lst, identity,
1540 group = cl.group(msng_cl_lst, identity,
1539 manifest_and_file_collector(changedfiles))
1541 manifest_and_file_collector(changedfiles))
1540 for chnk in group:
1542 for chnk in group:
1541 yield chnk
1543 yield chnk
1542
1544
1543 # The list of manifests has been collected by the generator
1545 # The list of manifests has been collected by the generator
1544 # calling our functions back.
1546 # calling our functions back.
1545 prune_manifests()
1547 prune_manifests()
1546 msng_mnfst_lst = msng_mnfst_set.keys()
1548 msng_mnfst_lst = msng_mnfst_set.keys()
1547 # Sort the manifestnodes by revision number.
1549 # Sort the manifestnodes by revision number.
1548 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1550 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1549 # Create a generator for the manifestnodes that calls our lookup
1551 # Create a generator for the manifestnodes that calls our lookup
1550 # and data collection functions back.
1552 # and data collection functions back.
1551 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1553 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1552 filenode_collector(changedfiles))
1554 filenode_collector(changedfiles))
1553 for chnk in group:
1555 for chnk in group:
1554 yield chnk
1556 yield chnk
1555
1557
1556 # These are no longer needed, dereference and toss the memory for
1558 # These are no longer needed, dereference and toss the memory for
1557 # them.
1559 # them.
1558 msng_mnfst_lst = None
1560 msng_mnfst_lst = None
1559 msng_mnfst_set.clear()
1561 msng_mnfst_set.clear()
1560
1562
1561 changedfiles = changedfiles.keys()
1563 changedfiles = changedfiles.keys()
1562 changedfiles.sort()
1564 changedfiles.sort()
1563 # Go through all our files in order sorted by name.
1565 # Go through all our files in order sorted by name.
1564 for fname in changedfiles:
1566 for fname in changedfiles:
1565 filerevlog = self.file(fname)
1567 filerevlog = self.file(fname)
1566 # Toss out the filenodes that the recipient isn't really
1568 # Toss out the filenodes that the recipient isn't really
1567 # missing.
1569 # missing.
1568 if msng_filenode_set.has_key(fname):
1570 if msng_filenode_set.has_key(fname):
1569 prune_filenodes(fname, filerevlog)
1571 prune_filenodes(fname, filerevlog)
1570 msng_filenode_lst = msng_filenode_set[fname].keys()
1572 msng_filenode_lst = msng_filenode_set[fname].keys()
1571 else:
1573 else:
1572 msng_filenode_lst = []
1574 msng_filenode_lst = []
1573 # If any filenodes are left, generate the group for them,
1575 # If any filenodes are left, generate the group for them,
1574 # otherwise don't bother.
1576 # otherwise don't bother.
1575 if len(msng_filenode_lst) > 0:
1577 if len(msng_filenode_lst) > 0:
1576 yield changegroup.genchunk(fname)
1578 yield changegroup.genchunk(fname)
1577 # Sort the filenodes by their revision #
1579 # Sort the filenodes by their revision #
1578 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1580 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1579 # Create a group generator and only pass in a changenode
1581 # Create a group generator and only pass in a changenode
1580 # lookup function as we need to collect no information
1582 # lookup function as we need to collect no information
1581 # from filenodes.
1583 # from filenodes.
1582 group = filerevlog.group(msng_filenode_lst,
1584 group = filerevlog.group(msng_filenode_lst,
1583 lookup_filenode_link_func(fname))
1585 lookup_filenode_link_func(fname))
1584 for chnk in group:
1586 for chnk in group:
1585 yield chnk
1587 yield chnk
1586 if msng_filenode_set.has_key(fname):
1588 if msng_filenode_set.has_key(fname):
1587 # Don't need this anymore, toss it to free memory.
1589 # Don't need this anymore, toss it to free memory.
1588 del msng_filenode_set[fname]
1590 del msng_filenode_set[fname]
1589 # Signal that no more groups are left.
1591 # Signal that no more groups are left.
1590 yield changegroup.closechunk()
1592 yield changegroup.closechunk()
1591
1593
1592 if msng_cl_lst:
1594 if msng_cl_lst:
1593 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1595 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1594
1596
1595 return util.chunkbuffer(gengroup())
1597 return util.chunkbuffer(gengroup())
1596
1598
1597 def changegroup(self, basenodes, source):
1599 def changegroup(self, basenodes, source):
1598 """Generate a changegroup of all nodes that we have that a recipient
1600 """Generate a changegroup of all nodes that we have that a recipient
1599 doesn't.
1601 doesn't.
1600
1602
1601 This is much easier than the previous function as we can assume that
1603 This is much easier than the previous function as we can assume that
1602 the recipient has any changenode we aren't sending them."""
1604 the recipient has any changenode we aren't sending them."""
1603
1605
1604 self.hook('preoutgoing', throw=True, source=source)
1606 self.hook('preoutgoing', throw=True, source=source)
1605
1607
1606 cl = self.changelog
1608 cl = self.changelog
1607 nodes = cl.nodesbetween(basenodes, None)[0]
1609 nodes = cl.nodesbetween(basenodes, None)[0]
1608 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1610 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1609
1611
1610 def identity(x):
1612 def identity(x):
1611 return x
1613 return x
1612
1614
1613 def gennodelst(revlog):
1615 def gennodelst(revlog):
1614 for r in xrange(0, revlog.count()):
1616 for r in xrange(0, revlog.count()):
1615 n = revlog.node(r)
1617 n = revlog.node(r)
1616 if revlog.linkrev(n) in revset:
1618 if revlog.linkrev(n) in revset:
1617 yield n
1619 yield n
1618
1620
1619 def changed_file_collector(changedfileset):
1621 def changed_file_collector(changedfileset):
1620 def collect_changed_files(clnode):
1622 def collect_changed_files(clnode):
1621 c = cl.read(clnode)
1623 c = cl.read(clnode)
1622 for fname in c[3]:
1624 for fname in c[3]:
1623 changedfileset[fname] = 1
1625 changedfileset[fname] = 1
1624 return collect_changed_files
1626 return collect_changed_files
1625
1627
1626 def lookuprevlink_func(revlog):
1628 def lookuprevlink_func(revlog):
1627 def lookuprevlink(n):
1629 def lookuprevlink(n):
1628 return cl.node(revlog.linkrev(n))
1630 return cl.node(revlog.linkrev(n))
1629 return lookuprevlink
1631 return lookuprevlink
1630
1632
1631 def gengroup():
1633 def gengroup():
1632 # construct a list of all changed files
1634 # construct a list of all changed files
1633 changedfiles = {}
1635 changedfiles = {}
1634
1636
1635 for chnk in cl.group(nodes, identity,
1637 for chnk in cl.group(nodes, identity,
1636 changed_file_collector(changedfiles)):
1638 changed_file_collector(changedfiles)):
1637 yield chnk
1639 yield chnk
1638 changedfiles = changedfiles.keys()
1640 changedfiles = changedfiles.keys()
1639 changedfiles.sort()
1641 changedfiles.sort()
1640
1642
1641 mnfst = self.manifest
1643 mnfst = self.manifest
1642 nodeiter = gennodelst(mnfst)
1644 nodeiter = gennodelst(mnfst)
1643 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1645 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1644 yield chnk
1646 yield chnk
1645
1647
1646 for fname in changedfiles:
1648 for fname in changedfiles:
1647 filerevlog = self.file(fname)
1649 filerevlog = self.file(fname)
1648 nodeiter = gennodelst(filerevlog)
1650 nodeiter = gennodelst(filerevlog)
1649 nodeiter = list(nodeiter)
1651 nodeiter = list(nodeiter)
1650 if nodeiter:
1652 if nodeiter:
1651 yield changegroup.genchunk(fname)
1653 yield changegroup.genchunk(fname)
1652 lookup = lookuprevlink_func(filerevlog)
1654 lookup = lookuprevlink_func(filerevlog)
1653 for chnk in filerevlog.group(nodeiter, lookup):
1655 for chnk in filerevlog.group(nodeiter, lookup):
1654 yield chnk
1656 yield chnk
1655
1657
1656 yield changegroup.closechunk()
1658 yield changegroup.closechunk()
1657
1659
1658 if nodes:
1660 if nodes:
1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1661 self.hook('outgoing', node=hex(nodes[0]), source=source)
1660
1662
1661 return util.chunkbuffer(gengroup())
1663 return util.chunkbuffer(gengroup())
1662
1664
1663 def addchangegroup(self, source, srctype, url):
1665 def addchangegroup(self, source, srctype, url):
1664 """add changegroup to repo.
1666 """add changegroup to repo.
1665 returns number of heads modified or added + 1."""
1667 returns number of heads modified or added + 1."""
1666
1668
1667 def csmap(x):
1669 def csmap(x):
1668 self.ui.debug(_("add changeset %s\n") % short(x))
1670 self.ui.debug(_("add changeset %s\n") % short(x))
1669 return cl.count()
1671 return cl.count()
1670
1672
1671 def revmap(x):
1673 def revmap(x):
1672 return cl.rev(x)
1674 return cl.rev(x)
1673
1675
1674 if not source:
1676 if not source:
1675 return 0
1677 return 0
1676
1678
1677 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1679 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1678
1680
1679 changesets = files = revisions = 0
1681 changesets = files = revisions = 0
1680
1682
1681 tr = self.transaction()
1683 tr = self.transaction()
1682
1684
1683 # write changelog data to temp files so concurrent readers will not see
1685 # write changelog data to temp files so concurrent readers will not see
1684 # inconsistent view
1686 # inconsistent view
1685 cl = None
1687 cl = None
1686 try:
1688 try:
1687 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1689 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1688
1690
1689 oldheads = len(cl.heads())
1691 oldheads = len(cl.heads())
1690
1692
1691 # pull off the changeset group
1693 # pull off the changeset group
1692 self.ui.status(_("adding changesets\n"))
1694 self.ui.status(_("adding changesets\n"))
1693 cor = cl.count() - 1
1695 cor = cl.count() - 1
1694 chunkiter = changegroup.chunkiter(source)
1696 chunkiter = changegroup.chunkiter(source)
1695 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1697 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1696 raise util.Abort(_("received changelog group is empty"))
1698 raise util.Abort(_("received changelog group is empty"))
1697 cnr = cl.count() - 1
1699 cnr = cl.count() - 1
1698 changesets = cnr - cor
1700 changesets = cnr - cor
1699
1701
1700 # pull off the manifest group
1702 # pull off the manifest group
1701 self.ui.status(_("adding manifests\n"))
1703 self.ui.status(_("adding manifests\n"))
1702 chunkiter = changegroup.chunkiter(source)
1704 chunkiter = changegroup.chunkiter(source)
1703 # no need to check for empty manifest group here:
1705 # no need to check for empty manifest group here:
1704 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1706 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1705 # no new manifest will be created and the manifest group will
1707 # no new manifest will be created and the manifest group will
1706 # be empty during the pull
1708 # be empty during the pull
1707 self.manifest.addgroup(chunkiter, revmap, tr)
1709 self.manifest.addgroup(chunkiter, revmap, tr)
1708
1710
1709 # process the files
1711 # process the files
1710 self.ui.status(_("adding file changes\n"))
1712 self.ui.status(_("adding file changes\n"))
1711 while 1:
1713 while 1:
1712 f = changegroup.getchunk(source)
1714 f = changegroup.getchunk(source)
1713 if not f:
1715 if not f:
1714 break
1716 break
1715 self.ui.debug(_("adding %s revisions\n") % f)
1717 self.ui.debug(_("adding %s revisions\n") % f)
1716 fl = self.file(f)
1718 fl = self.file(f)
1717 o = fl.count()
1719 o = fl.count()
1718 chunkiter = changegroup.chunkiter(source)
1720 chunkiter = changegroup.chunkiter(source)
1719 if fl.addgroup(chunkiter, revmap, tr) is None:
1721 if fl.addgroup(chunkiter, revmap, tr) is None:
1720 raise util.Abort(_("received file revlog group is empty"))
1722 raise util.Abort(_("received file revlog group is empty"))
1721 revisions += fl.count() - o
1723 revisions += fl.count() - o
1722 files += 1
1724 files += 1
1723
1725
1724 cl.writedata()
1726 cl.writedata()
1725 finally:
1727 finally:
1726 if cl:
1728 if cl:
1727 cl.cleanup()
1729 cl.cleanup()
1728
1730
1729 # make changelog see real files again
1731 # make changelog see real files again
1730 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1732 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1731 self.changelog.checkinlinesize(tr)
1733 self.changelog.checkinlinesize(tr)
1732
1734
1733 newheads = len(self.changelog.heads())
1735 newheads = len(self.changelog.heads())
1734 heads = ""
1736 heads = ""
1735 if oldheads and newheads != oldheads:
1737 if oldheads and newheads != oldheads:
1736 heads = _(" (%+d heads)") % (newheads - oldheads)
1738 heads = _(" (%+d heads)") % (newheads - oldheads)
1737
1739
1738 self.ui.status(_("added %d changesets"
1740 self.ui.status(_("added %d changesets"
1739 " with %d changes to %d files%s\n")
1741 " with %d changes to %d files%s\n")
1740 % (changesets, revisions, files, heads))
1742 % (changesets, revisions, files, heads))
1741
1743
1742 if changesets > 0:
1744 if changesets > 0:
1743 self.hook('pretxnchangegroup', throw=True,
1745 self.hook('pretxnchangegroup', throw=True,
1744 node=hex(self.changelog.node(cor+1)), source=srctype,
1746 node=hex(self.changelog.node(cor+1)), source=srctype,
1745 url=url)
1747 url=url)
1746
1748
1747 tr.close()
1749 tr.close()
1748
1750
1749 if changesets > 0:
1751 if changesets > 0:
1750 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1752 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1751 source=srctype, url=url)
1753 source=srctype, url=url)
1752
1754
1753 for i in range(cor + 1, cnr + 1):
1755 for i in range(cor + 1, cnr + 1):
1754 self.hook("incoming", node=hex(self.changelog.node(i)),
1756 self.hook("incoming", node=hex(self.changelog.node(i)),
1755 source=srctype, url=url)
1757 source=srctype, url=url)
1756
1758
1757 return newheads - oldheads + 1
1759 return newheads - oldheads + 1
1758
1760
1759
1761
1760 def stream_in(self, remote):
1762 def stream_in(self, remote):
1761 fp = remote.stream_out()
1763 fp = remote.stream_out()
1762 resp = int(fp.readline())
1764 resp = int(fp.readline())
1763 if resp != 0:
1765 if resp != 0:
1764 raise util.Abort(_('operation forbidden by server'))
1766 raise util.Abort(_('operation forbidden by server'))
1765 self.ui.status(_('streaming all changes\n'))
1767 self.ui.status(_('streaming all changes\n'))
1766 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1768 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1767 self.ui.status(_('%d files to transfer, %s of data\n') %
1769 self.ui.status(_('%d files to transfer, %s of data\n') %
1768 (total_files, util.bytecount(total_bytes)))
1770 (total_files, util.bytecount(total_bytes)))
1769 start = time.time()
1771 start = time.time()
1770 for i in xrange(total_files):
1772 for i in xrange(total_files):
1771 name, size = fp.readline().split('\0', 1)
1773 name, size = fp.readline().split('\0', 1)
1772 size = int(size)
1774 size = int(size)
1773 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1775 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1774 ofp = self.opener(name, 'w')
1776 ofp = self.opener(name, 'w')
1775 for chunk in util.filechunkiter(fp, limit=size):
1777 for chunk in util.filechunkiter(fp, limit=size):
1776 ofp.write(chunk)
1778 ofp.write(chunk)
1777 ofp.close()
1779 ofp.close()
1778 elapsed = time.time() - start
1780 elapsed = time.time() - start
1779 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1781 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1780 (util.bytecount(total_bytes), elapsed,
1782 (util.bytecount(total_bytes), elapsed,
1781 util.bytecount(total_bytes / elapsed)))
1783 util.bytecount(total_bytes / elapsed)))
1782 self.reload()
1784 self.reload()
1783 return len(self.heads()) + 1
1785 return len(self.heads()) + 1
1784
1786
1785 def clone(self, remote, heads=[], stream=False):
1787 def clone(self, remote, heads=[], stream=False):
1786 '''clone remote repository.
1788 '''clone remote repository.
1787
1789
1788 keyword arguments:
1790 keyword arguments:
1789 heads: list of revs to clone (forces use of pull)
1791 heads: list of revs to clone (forces use of pull)
1790 stream: use streaming clone if possible'''
1792 stream: use streaming clone if possible'''
1791
1793
1792 # now, all clients that can request uncompressed clones can
1794 # now, all clients that can request uncompressed clones can
1793 # read repo formats supported by all servers that can serve
1795 # read repo formats supported by all servers that can serve
1794 # them.
1796 # them.
1795
1797
1796 # if revlog format changes, client will have to check version
1798 # if revlog format changes, client will have to check version
1797 # and format flags on "stream" capability, and use
1799 # and format flags on "stream" capability, and use
1798 # uncompressed only if compatible.
1800 # uncompressed only if compatible.
1799
1801
1800 if stream and not heads and remote.capable('stream'):
1802 if stream and not heads and remote.capable('stream'):
1801 return self.stream_in(remote)
1803 return self.stream_in(remote)
1802 return self.pull(remote, heads)
1804 return self.pull(remote, heads)
1803
1805
1804 # used to avoid circular references so destructors work
1806 # used to avoid circular references so destructors work
1805 def aftertrans(base):
1807 def aftertrans(base):
1806 p = base
1808 p = base
1807 def a():
1809 def a():
1808 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1810 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1809 util.rename(os.path.join(p, "journal.dirstate"),
1811 util.rename(os.path.join(p, "journal.dirstate"),
1810 os.path.join(p, "undo.dirstate"))
1812 os.path.join(p, "undo.dirstate"))
1811 return a
1813 return a
1812
1814
1813 def instance(ui, path, create):
1815 def instance(ui, path, create):
1814 return localrepository(ui, util.drop_scheme('file', path), create)
1816 return localrepository(ui, util.drop_scheme('file', path), create)
1815
1817
1816 def islocal(path):
1818 def islocal(path):
1817 return True
1819 return True
General Comments 0
You need to be logged in to leave comments. Login now