##// END OF EJS Templates
branchtags: use changectx...
Matt Mackall -
r3439:a7ef6b6c default
parent child Browse files
Show More
@@ -1,1814 +1,1813
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {}
296 self.branchcache = {} # avoid recursion in changectx
297
297
298 try:
298 try:
299 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
302 if self.changelog.node(lrev) == last: # sanity check
302 if self.changelog.node(lrev) == last: # sanity check
303 for l in f:
303 for l in f:
304 node, label = l.rstrip().split(" ", 1)
304 node, label = l.rstrip().split(" ", 1)
305 self.branchcache[label] = bin(node)
305 self.branchcache[label] = bin(node)
306 f.close()
306 f.close()
307 except IOError:
307 except IOError:
308 last, lrev = nullid, -1
308 last, lrev = nullid, -1
309 lrev = self.changelog.rev(last)
309 lrev = self.changelog.rev(last)
310
310
311 tip = self.changelog.count() - 1
311 tip = self.changelog.count() - 1
312 if lrev != tip:
312 if lrev != tip:
313 for r in range(lrev + 1, tip + 1):
313 for r in xrange(lrev + 1, tip + 1):
314 n = self.changelog.node(r)
314 c = self.changectx(r)
315 c = self.changelog.read(n)
315 b = c.branch()
316 b = c[5].get("branch")
317 if b:
316 if b:
318 self.branchcache[b] = n
317 self.branchcache[b] = c.node()
319 self._writebranchcache()
318 self._writebranchcache()
320
319
321 return self.branchcache
320 return self.branchcache
322
321
323 def _writebranchcache(self):
322 def _writebranchcache(self):
324 f = self.opener("branches.cache", "w")
323 f = self.opener("branches.cache", "w")
325 t = self.changelog.tip()
324 t = self.changelog.tip()
326 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
325 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
327 for label, node in self.branchcache.iteritems():
326 for label, node in self.branchcache.iteritems():
328 f.write("%s %s\n" % (hex(node), label))
327 f.write("%s %s\n" % (hex(node), label))
329
328
330 def lookup(self, key):
329 def lookup(self, key):
331 if key == '.':
330 if key == '.':
332 key = self.dirstate.parents()[0]
331 key = self.dirstate.parents()[0]
333 if key == nullid:
332 if key == nullid:
334 raise repo.RepoError(_("no revision checked out"))
333 raise repo.RepoError(_("no revision checked out"))
335 if key in self.tags():
334 if key in self.tags():
336 return self.tags()[key]
335 return self.tags()[key]
337 if key in self.branchtags():
336 if key in self.branchtags():
338 return self.branchtags()[key]
337 return self.branchtags()[key]
339 try:
338 try:
340 return self.changelog.lookup(key)
339 return self.changelog.lookup(key)
341 except:
340 except:
342 raise repo.RepoError(_("unknown revision '%s'") % key)
341 raise repo.RepoError(_("unknown revision '%s'") % key)
343
342
344 def dev(self):
343 def dev(self):
345 return os.lstat(self.path).st_dev
344 return os.lstat(self.path).st_dev
346
345
347 def local(self):
346 def local(self):
348 return True
347 return True
349
348
350 def join(self, f):
349 def join(self, f):
351 return os.path.join(self.path, f)
350 return os.path.join(self.path, f)
352
351
353 def wjoin(self, f):
352 def wjoin(self, f):
354 return os.path.join(self.root, f)
353 return os.path.join(self.root, f)
355
354
356 def file(self, f):
355 def file(self, f):
357 if f[0] == '/':
356 if f[0] == '/':
358 f = f[1:]
357 f = f[1:]
359 return filelog.filelog(self.opener, f, self.revlogversion)
358 return filelog.filelog(self.opener, f, self.revlogversion)
360
359
361 def changectx(self, changeid=None):
360 def changectx(self, changeid=None):
362 return context.changectx(self, changeid)
361 return context.changectx(self, changeid)
363
362
364 def workingctx(self):
363 def workingctx(self):
365 return context.workingctx(self)
364 return context.workingctx(self)
366
365
367 def parents(self, changeid=None):
366 def parents(self, changeid=None):
368 '''
367 '''
369 get list of changectxs for parents of changeid or working directory
368 get list of changectxs for parents of changeid or working directory
370 '''
369 '''
371 if changeid is None:
370 if changeid is None:
372 pl = self.dirstate.parents()
371 pl = self.dirstate.parents()
373 else:
372 else:
374 n = self.changelog.lookup(changeid)
373 n = self.changelog.lookup(changeid)
375 pl = self.changelog.parents(n)
374 pl = self.changelog.parents(n)
376 if pl[1] == nullid:
375 if pl[1] == nullid:
377 return [self.changectx(pl[0])]
376 return [self.changectx(pl[0])]
378 return [self.changectx(pl[0]), self.changectx(pl[1])]
377 return [self.changectx(pl[0]), self.changectx(pl[1])]
379
378
380 def filectx(self, path, changeid=None, fileid=None):
379 def filectx(self, path, changeid=None, fileid=None):
381 """changeid can be a changeset revision, node, or tag.
380 """changeid can be a changeset revision, node, or tag.
382 fileid can be a file revision or node."""
381 fileid can be a file revision or node."""
383 return context.filectx(self, path, changeid, fileid)
382 return context.filectx(self, path, changeid, fileid)
384
383
385 def getcwd(self):
384 def getcwd(self):
386 return self.dirstate.getcwd()
385 return self.dirstate.getcwd()
387
386
388 def wfile(self, f, mode='r'):
387 def wfile(self, f, mode='r'):
389 return self.wopener(f, mode)
388 return self.wopener(f, mode)
390
389
391 def wread(self, filename):
390 def wread(self, filename):
392 if self.encodepats == None:
391 if self.encodepats == None:
393 l = []
392 l = []
394 for pat, cmd in self.ui.configitems("encode"):
393 for pat, cmd in self.ui.configitems("encode"):
395 mf = util.matcher(self.root, "", [pat], [], [])[1]
394 mf = util.matcher(self.root, "", [pat], [], [])[1]
396 l.append((mf, cmd))
395 l.append((mf, cmd))
397 self.encodepats = l
396 self.encodepats = l
398
397
399 data = self.wopener(filename, 'r').read()
398 data = self.wopener(filename, 'r').read()
400
399
401 for mf, cmd in self.encodepats:
400 for mf, cmd in self.encodepats:
402 if mf(filename):
401 if mf(filename):
403 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
402 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
404 data = util.filter(data, cmd)
403 data = util.filter(data, cmd)
405 break
404 break
406
405
407 return data
406 return data
408
407
409 def wwrite(self, filename, data, fd=None):
408 def wwrite(self, filename, data, fd=None):
410 if self.decodepats == None:
409 if self.decodepats == None:
411 l = []
410 l = []
412 for pat, cmd in self.ui.configitems("decode"):
411 for pat, cmd in self.ui.configitems("decode"):
413 mf = util.matcher(self.root, "", [pat], [], [])[1]
412 mf = util.matcher(self.root, "", [pat], [], [])[1]
414 l.append((mf, cmd))
413 l.append((mf, cmd))
415 self.decodepats = l
414 self.decodepats = l
416
415
417 for mf, cmd in self.decodepats:
416 for mf, cmd in self.decodepats:
418 if mf(filename):
417 if mf(filename):
419 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
418 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
420 data = util.filter(data, cmd)
419 data = util.filter(data, cmd)
421 break
420 break
422
421
423 if fd:
422 if fd:
424 return fd.write(data)
423 return fd.write(data)
425 return self.wopener(filename, 'w').write(data)
424 return self.wopener(filename, 'w').write(data)
426
425
427 def transaction(self):
426 def transaction(self):
428 tr = self.transhandle
427 tr = self.transhandle
429 if tr != None and tr.running():
428 if tr != None and tr.running():
430 return tr.nest()
429 return tr.nest()
431
430
432 # save dirstate for rollback
431 # save dirstate for rollback
433 try:
432 try:
434 ds = self.opener("dirstate").read()
433 ds = self.opener("dirstate").read()
435 except IOError:
434 except IOError:
436 ds = ""
435 ds = ""
437 self.opener("journal.dirstate", "w").write(ds)
436 self.opener("journal.dirstate", "w").write(ds)
438
437
439 tr = transaction.transaction(self.ui.warn, self.opener,
438 tr = transaction.transaction(self.ui.warn, self.opener,
440 self.join("journal"),
439 self.join("journal"),
441 aftertrans(self.path))
440 aftertrans(self.path))
442 self.transhandle = tr
441 self.transhandle = tr
443 return tr
442 return tr
444
443
445 def recover(self):
444 def recover(self):
446 l = self.lock()
445 l = self.lock()
447 if os.path.exists(self.join("journal")):
446 if os.path.exists(self.join("journal")):
448 self.ui.status(_("rolling back interrupted transaction\n"))
447 self.ui.status(_("rolling back interrupted transaction\n"))
449 transaction.rollback(self.opener, self.join("journal"))
448 transaction.rollback(self.opener, self.join("journal"))
450 self.reload()
449 self.reload()
451 return True
450 return True
452 else:
451 else:
453 self.ui.warn(_("no interrupted transaction available\n"))
452 self.ui.warn(_("no interrupted transaction available\n"))
454 return False
453 return False
455
454
456 def rollback(self, wlock=None):
455 def rollback(self, wlock=None):
457 if not wlock:
456 if not wlock:
458 wlock = self.wlock()
457 wlock = self.wlock()
459 l = self.lock()
458 l = self.lock()
460 if os.path.exists(self.join("undo")):
459 if os.path.exists(self.join("undo")):
461 self.ui.status(_("rolling back last transaction\n"))
460 self.ui.status(_("rolling back last transaction\n"))
462 transaction.rollback(self.opener, self.join("undo"))
461 transaction.rollback(self.opener, self.join("undo"))
463 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
462 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
464 self.reload()
463 self.reload()
465 self.wreload()
464 self.wreload()
466 else:
465 else:
467 self.ui.warn(_("no rollback information available\n"))
466 self.ui.warn(_("no rollback information available\n"))
468
467
469 def wreload(self):
468 def wreload(self):
470 self.dirstate.read()
469 self.dirstate.read()
471
470
472 def reload(self):
471 def reload(self):
473 self.changelog.load()
472 self.changelog.load()
474 self.manifest.load()
473 self.manifest.load()
475 self.tagscache = None
474 self.tagscache = None
476 self.nodetagscache = None
475 self.nodetagscache = None
477
476
478 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
477 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
479 desc=None):
478 desc=None):
480 try:
479 try:
481 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
480 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
482 except lock.LockHeld, inst:
481 except lock.LockHeld, inst:
483 if not wait:
482 if not wait:
484 raise
483 raise
485 self.ui.warn(_("waiting for lock on %s held by %s\n") %
484 self.ui.warn(_("waiting for lock on %s held by %s\n") %
486 (desc, inst.args[0]))
485 (desc, inst.args[0]))
487 # default to 600 seconds timeout
486 # default to 600 seconds timeout
488 l = lock.lock(self.join(lockname),
487 l = lock.lock(self.join(lockname),
489 int(self.ui.config("ui", "timeout") or 600),
488 int(self.ui.config("ui", "timeout") or 600),
490 releasefn, desc=desc)
489 releasefn, desc=desc)
491 if acquirefn:
490 if acquirefn:
492 acquirefn()
491 acquirefn()
493 return l
492 return l
494
493
495 def lock(self, wait=1):
494 def lock(self, wait=1):
496 return self.do_lock("lock", wait, acquirefn=self.reload,
495 return self.do_lock("lock", wait, acquirefn=self.reload,
497 desc=_('repository %s') % self.origroot)
496 desc=_('repository %s') % self.origroot)
498
497
499 def wlock(self, wait=1):
498 def wlock(self, wait=1):
500 return self.do_lock("wlock", wait, self.dirstate.write,
499 return self.do_lock("wlock", wait, self.dirstate.write,
501 self.wreload,
500 self.wreload,
502 desc=_('working directory of %s') % self.origroot)
501 desc=_('working directory of %s') % self.origroot)
503
502
504 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
503 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
505 """
504 """
506 commit an individual file as part of a larger transaction
505 commit an individual file as part of a larger transaction
507 """
506 """
508
507
509 t = self.wread(fn)
508 t = self.wread(fn)
510 fl = self.file(fn)
509 fl = self.file(fn)
511 fp1 = manifest1.get(fn, nullid)
510 fp1 = manifest1.get(fn, nullid)
512 fp2 = manifest2.get(fn, nullid)
511 fp2 = manifest2.get(fn, nullid)
513
512
514 meta = {}
513 meta = {}
515 cp = self.dirstate.copied(fn)
514 cp = self.dirstate.copied(fn)
516 if cp:
515 if cp:
517 meta["copy"] = cp
516 meta["copy"] = cp
518 if not manifest2: # not a branch merge
517 if not manifest2: # not a branch merge
519 meta["copyrev"] = hex(manifest1.get(cp, nullid))
518 meta["copyrev"] = hex(manifest1.get(cp, nullid))
520 fp2 = nullid
519 fp2 = nullid
521 elif fp2 != nullid: # copied on remote side
520 elif fp2 != nullid: # copied on remote side
522 meta["copyrev"] = hex(manifest1.get(cp, nullid))
521 meta["copyrev"] = hex(manifest1.get(cp, nullid))
523 else: # copied on local side, reversed
522 else: # copied on local side, reversed
524 meta["copyrev"] = hex(manifest2.get(cp))
523 meta["copyrev"] = hex(manifest2.get(cp))
525 fp2 = nullid
524 fp2 = nullid
526 self.ui.debug(_(" %s: copy %s:%s\n") %
525 self.ui.debug(_(" %s: copy %s:%s\n") %
527 (fn, cp, meta["copyrev"]))
526 (fn, cp, meta["copyrev"]))
528 fp1 = nullid
527 fp1 = nullid
529 elif fp2 != nullid:
528 elif fp2 != nullid:
530 # is one parent an ancestor of the other?
529 # is one parent an ancestor of the other?
531 fpa = fl.ancestor(fp1, fp2)
530 fpa = fl.ancestor(fp1, fp2)
532 if fpa == fp1:
531 if fpa == fp1:
533 fp1, fp2 = fp2, nullid
532 fp1, fp2 = fp2, nullid
534 elif fpa == fp2:
533 elif fpa == fp2:
535 fp2 = nullid
534 fp2 = nullid
536
535
537 # is the file unmodified from the parent? report existing entry
536 # is the file unmodified from the parent? report existing entry
538 if fp2 == nullid and not fl.cmp(fp1, t):
537 if fp2 == nullid and not fl.cmp(fp1, t):
539 return fp1
538 return fp1
540
539
541 changelist.append(fn)
540 changelist.append(fn)
542 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
541 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
543
542
544 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
543 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
545 orig_parent = self.dirstate.parents()[0] or nullid
544 orig_parent = self.dirstate.parents()[0] or nullid
546 p1 = p1 or self.dirstate.parents()[0] or nullid
545 p1 = p1 or self.dirstate.parents()[0] or nullid
547 p2 = p2 or self.dirstate.parents()[1] or nullid
546 p2 = p2 or self.dirstate.parents()[1] or nullid
548 c1 = self.changelog.read(p1)
547 c1 = self.changelog.read(p1)
549 c2 = self.changelog.read(p2)
548 c2 = self.changelog.read(p2)
550 m1 = self.manifest.read(c1[0]).copy()
549 m1 = self.manifest.read(c1[0]).copy()
551 m2 = self.manifest.read(c2[0])
550 m2 = self.manifest.read(c2[0])
552 changed = []
551 changed = []
553 removed = []
552 removed = []
554
553
555 if orig_parent == p1:
554 if orig_parent == p1:
556 update_dirstate = 1
555 update_dirstate = 1
557 else:
556 else:
558 update_dirstate = 0
557 update_dirstate = 0
559
558
560 if not wlock:
559 if not wlock:
561 wlock = self.wlock()
560 wlock = self.wlock()
562 l = self.lock()
561 l = self.lock()
563 tr = self.transaction()
562 tr = self.transaction()
564 linkrev = self.changelog.count()
563 linkrev = self.changelog.count()
565 for f in files:
564 for f in files:
566 try:
565 try:
567 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
566 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
569 except IOError:
568 except IOError:
570 try:
569 try:
571 del m1[f]
570 del m1[f]
572 if update_dirstate:
571 if update_dirstate:
573 self.dirstate.forget([f])
572 self.dirstate.forget([f])
574 removed.append(f)
573 removed.append(f)
575 except:
574 except:
576 # deleted from p2?
575 # deleted from p2?
577 pass
576 pass
578
577
579 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
578 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
580 user = user or self.ui.username()
579 user = user or self.ui.username()
581 n = self.changelog.add(mnode, changed + removed, text,
580 n = self.changelog.add(mnode, changed + removed, text,
582 tr, p1, p2, user, date)
581 tr, p1, p2, user, date)
583 tr.close()
582 tr.close()
584 if update_dirstate:
583 if update_dirstate:
585 self.dirstate.setparents(n, nullid)
584 self.dirstate.setparents(n, nullid)
586
585
587 def commit(self, files=None, text="", user=None, date=None,
586 def commit(self, files=None, text="", user=None, date=None,
588 match=util.always, force=False, lock=None, wlock=None,
587 match=util.always, force=False, lock=None, wlock=None,
589 force_editor=False):
588 force_editor=False):
590 commit = []
589 commit = []
591 remove = []
590 remove = []
592 changed = []
591 changed = []
593
592
594 if files:
593 if files:
595 for f in files:
594 for f in files:
596 s = self.dirstate.state(f)
595 s = self.dirstate.state(f)
597 if s in 'nmai':
596 if s in 'nmai':
598 commit.append(f)
597 commit.append(f)
599 elif s == 'r':
598 elif s == 'r':
600 remove.append(f)
599 remove.append(f)
601 else:
600 else:
602 self.ui.warn(_("%s not tracked!\n") % f)
601 self.ui.warn(_("%s not tracked!\n") % f)
603 else:
602 else:
604 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
603 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
605 commit = modified + added
604 commit = modified + added
606 remove = removed
605 remove = removed
607
606
608 p1, p2 = self.dirstate.parents()
607 p1, p2 = self.dirstate.parents()
609 c1 = self.changelog.read(p1)
608 c1 = self.changelog.read(p1)
610 c2 = self.changelog.read(p2)
609 c2 = self.changelog.read(p2)
611 m1 = self.manifest.read(c1[0]).copy()
610 m1 = self.manifest.read(c1[0]).copy()
612 m2 = self.manifest.read(c2[0])
611 m2 = self.manifest.read(c2[0])
613
612
614 try:
613 try:
615 branchname = self.opener("branch").read().rstrip()
614 branchname = self.opener("branch").read().rstrip()
616 except IOError:
615 except IOError:
617 branchname = ""
616 branchname = ""
618 oldname = c1[5].get("branch", "")
617 oldname = c1[5].get("branch", "")
619
618
620 if not commit and not remove and not force and p2 == nullid and \
619 if not commit and not remove and not force and p2 == nullid and \
621 branchname == oldname:
620 branchname == oldname:
622 self.ui.status(_("nothing changed\n"))
621 self.ui.status(_("nothing changed\n"))
623 return None
622 return None
624
623
625 xp1 = hex(p1)
624 xp1 = hex(p1)
626 if p2 == nullid: xp2 = ''
625 if p2 == nullid: xp2 = ''
627 else: xp2 = hex(p2)
626 else: xp2 = hex(p2)
628
627
629 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
628 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
630
629
631 if not wlock:
630 if not wlock:
632 wlock = self.wlock()
631 wlock = self.wlock()
633 if not lock:
632 if not lock:
634 lock = self.lock()
633 lock = self.lock()
635 tr = self.transaction()
634 tr = self.transaction()
636
635
637 # check in files
636 # check in files
638 new = {}
637 new = {}
639 linkrev = self.changelog.count()
638 linkrev = self.changelog.count()
640 commit.sort()
639 commit.sort()
641 for f in commit:
640 for f in commit:
642 self.ui.note(f + "\n")
641 self.ui.note(f + "\n")
643 try:
642 try:
644 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
643 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
645 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
644 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
646 except IOError:
645 except IOError:
647 self.ui.warn(_("trouble committing %s!\n") % f)
646 self.ui.warn(_("trouble committing %s!\n") % f)
648 raise
647 raise
649
648
650 # update manifest
649 # update manifest
651 m1.update(new)
650 m1.update(new)
652 for f in remove:
651 for f in remove:
653 if f in m1:
652 if f in m1:
654 del m1[f]
653 del m1[f]
655 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
654 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
656
655
657 # add changeset
656 # add changeset
658 new = new.keys()
657 new = new.keys()
659 new.sort()
658 new.sort()
660
659
661 user = user or self.ui.username()
660 user = user or self.ui.username()
662 if not text or force_editor:
661 if not text or force_editor:
663 edittext = []
662 edittext = []
664 if text:
663 if text:
665 edittext.append(text)
664 edittext.append(text)
666 edittext.append("")
665 edittext.append("")
667 if p2 != nullid:
666 if p2 != nullid:
668 edittext.append("HG: branch merge")
667 edittext.append("HG: branch merge")
669 edittext.extend(["HG: changed %s" % f for f in changed])
668 edittext.extend(["HG: changed %s" % f for f in changed])
670 edittext.extend(["HG: removed %s" % f for f in remove])
669 edittext.extend(["HG: removed %s" % f for f in remove])
671 if not changed and not remove:
670 if not changed and not remove:
672 edittext.append("HG: no files changed")
671 edittext.append("HG: no files changed")
673 edittext.append("")
672 edittext.append("")
674 # run editor in the repository root
673 # run editor in the repository root
675 olddir = os.getcwd()
674 olddir = os.getcwd()
676 os.chdir(self.root)
675 os.chdir(self.root)
677 text = self.ui.edit("\n".join(edittext), user)
676 text = self.ui.edit("\n".join(edittext), user)
678 os.chdir(olddir)
677 os.chdir(olddir)
679
678
680 lines = [line.rstrip() for line in text.rstrip().splitlines()]
679 lines = [line.rstrip() for line in text.rstrip().splitlines()]
681 while lines and not lines[0]:
680 while lines and not lines[0]:
682 del lines[0]
681 del lines[0]
683 if not lines:
682 if not lines:
684 return None
683 return None
685 text = '\n'.join(lines)
684 text = '\n'.join(lines)
686 extra = {}
685 extra = {}
687 if branchname:
686 if branchname:
688 extra["branch"] = branchname
687 extra["branch"] = branchname
689 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
688 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
690 user, date, extra)
689 user, date, extra)
691 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
690 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
692 parent2=xp2)
691 parent2=xp2)
693 tr.close()
692 tr.close()
694
693
695 self.dirstate.setparents(n)
694 self.dirstate.setparents(n)
696 self.dirstate.update(new, "n")
695 self.dirstate.update(new, "n")
697 self.dirstate.forget(remove)
696 self.dirstate.forget(remove)
698
697
699 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
698 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
700 return n
699 return n
701
700
702 def walk(self, node=None, files=[], match=util.always, badmatch=None):
701 def walk(self, node=None, files=[], match=util.always, badmatch=None):
703 if node:
702 if node:
704 fdict = dict.fromkeys(files)
703 fdict = dict.fromkeys(files)
705 for fn in self.manifest.read(self.changelog.read(node)[0]):
704 for fn in self.manifest.read(self.changelog.read(node)[0]):
706 for ffn in fdict:
705 for ffn in fdict:
707 # match if the file is the exact name or a directory
706 # match if the file is the exact name or a directory
708 if ffn == fn or fn.startswith("%s/" % ffn):
707 if ffn == fn or fn.startswith("%s/" % ffn):
709 del fdict[ffn]
708 del fdict[ffn]
710 break
709 break
711 if match(fn):
710 if match(fn):
712 yield 'm', fn
711 yield 'm', fn
713 for fn in fdict:
712 for fn in fdict:
714 if badmatch and badmatch(fn):
713 if badmatch and badmatch(fn):
715 if match(fn):
714 if match(fn):
716 yield 'b', fn
715 yield 'b', fn
717 else:
716 else:
718 self.ui.warn(_('%s: No such file in rev %s\n') % (
717 self.ui.warn(_('%s: No such file in rev %s\n') % (
719 util.pathto(self.getcwd(), fn), short(node)))
718 util.pathto(self.getcwd(), fn), short(node)))
720 else:
719 else:
721 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
720 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
722 yield src, fn
721 yield src, fn
723
722
724 def status(self, node1=None, node2=None, files=[], match=util.always,
723 def status(self, node1=None, node2=None, files=[], match=util.always,
725 wlock=None, list_ignored=False, list_clean=False):
724 wlock=None, list_ignored=False, list_clean=False):
726 """return status of files between two nodes or node and working directory
725 """return status of files between two nodes or node and working directory
727
726
728 If node1 is None, use the first dirstate parent instead.
727 If node1 is None, use the first dirstate parent instead.
729 If node2 is None, compare node1 with working directory.
728 If node2 is None, compare node1 with working directory.
730 """
729 """
731
730
732 def fcmp(fn, mf):
731 def fcmp(fn, mf):
733 t1 = self.wread(fn)
732 t1 = self.wread(fn)
734 return self.file(fn).cmp(mf.get(fn, nullid), t1)
733 return self.file(fn).cmp(mf.get(fn, nullid), t1)
735
734
736 def mfmatches(node):
735 def mfmatches(node):
737 change = self.changelog.read(node)
736 change = self.changelog.read(node)
738 mf = self.manifest.read(change[0]).copy()
737 mf = self.manifest.read(change[0]).copy()
739 for fn in mf.keys():
738 for fn in mf.keys():
740 if not match(fn):
739 if not match(fn):
741 del mf[fn]
740 del mf[fn]
742 return mf
741 return mf
743
742
744 modified, added, removed, deleted, unknown = [], [], [], [], []
743 modified, added, removed, deleted, unknown = [], [], [], [], []
745 ignored, clean = [], []
744 ignored, clean = [], []
746
745
747 compareworking = False
746 compareworking = False
748 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
747 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
749 compareworking = True
748 compareworking = True
750
749
751 if not compareworking:
750 if not compareworking:
752 # read the manifest from node1 before the manifest from node2,
751 # read the manifest from node1 before the manifest from node2,
753 # so that we'll hit the manifest cache if we're going through
752 # so that we'll hit the manifest cache if we're going through
754 # all the revisions in parent->child order.
753 # all the revisions in parent->child order.
755 mf1 = mfmatches(node1)
754 mf1 = mfmatches(node1)
756
755
757 # are we comparing the working directory?
756 # are we comparing the working directory?
758 if not node2:
757 if not node2:
759 if not wlock:
758 if not wlock:
760 try:
759 try:
761 wlock = self.wlock(wait=0)
760 wlock = self.wlock(wait=0)
762 except lock.LockException:
761 except lock.LockException:
763 wlock = None
762 wlock = None
764 (lookup, modified, added, removed, deleted, unknown,
763 (lookup, modified, added, removed, deleted, unknown,
765 ignored, clean) = self.dirstate.status(files, match,
764 ignored, clean) = self.dirstate.status(files, match,
766 list_ignored, list_clean)
765 list_ignored, list_clean)
767
766
768 # are we comparing working dir against its parent?
767 # are we comparing working dir against its parent?
769 if compareworking:
768 if compareworking:
770 if lookup:
769 if lookup:
771 # do a full compare of any files that might have changed
770 # do a full compare of any files that might have changed
772 mf2 = mfmatches(self.dirstate.parents()[0])
771 mf2 = mfmatches(self.dirstate.parents()[0])
773 for f in lookup:
772 for f in lookup:
774 if fcmp(f, mf2):
773 if fcmp(f, mf2):
775 modified.append(f)
774 modified.append(f)
776 else:
775 else:
777 clean.append(f)
776 clean.append(f)
778 if wlock is not None:
777 if wlock is not None:
779 self.dirstate.update([f], "n")
778 self.dirstate.update([f], "n")
780 else:
779 else:
781 # we are comparing working dir against non-parent
780 # we are comparing working dir against non-parent
782 # generate a pseudo-manifest for the working dir
781 # generate a pseudo-manifest for the working dir
783 # XXX: create it in dirstate.py ?
782 # XXX: create it in dirstate.py ?
784 mf2 = mfmatches(self.dirstate.parents()[0])
783 mf2 = mfmatches(self.dirstate.parents()[0])
785 for f in lookup + modified + added:
784 for f in lookup + modified + added:
786 mf2[f] = ""
785 mf2[f] = ""
787 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
786 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
788 for f in removed:
787 for f in removed:
789 if f in mf2:
788 if f in mf2:
790 del mf2[f]
789 del mf2[f]
791 else:
790 else:
792 # we are comparing two revisions
791 # we are comparing two revisions
793 mf2 = mfmatches(node2)
792 mf2 = mfmatches(node2)
794
793
795 if not compareworking:
794 if not compareworking:
796 # flush lists from dirstate before comparing manifests
795 # flush lists from dirstate before comparing manifests
797 modified, added, clean = [], [], []
796 modified, added, clean = [], [], []
798
797
799 # make sure to sort the files so we talk to the disk in a
798 # make sure to sort the files so we talk to the disk in a
800 # reasonable order
799 # reasonable order
801 mf2keys = mf2.keys()
800 mf2keys = mf2.keys()
802 mf2keys.sort()
801 mf2keys.sort()
803 for fn in mf2keys:
802 for fn in mf2keys:
804 if mf1.has_key(fn):
803 if mf1.has_key(fn):
805 if mf1.flags(fn) != mf2.flags(fn) or \
804 if mf1.flags(fn) != mf2.flags(fn) or \
806 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
805 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
807 modified.append(fn)
806 modified.append(fn)
808 elif list_clean:
807 elif list_clean:
809 clean.append(fn)
808 clean.append(fn)
810 del mf1[fn]
809 del mf1[fn]
811 else:
810 else:
812 added.append(fn)
811 added.append(fn)
813
812
814 removed = mf1.keys()
813 removed = mf1.keys()
815
814
816 # sort and return results:
815 # sort and return results:
817 for l in modified, added, removed, deleted, unknown, ignored, clean:
816 for l in modified, added, removed, deleted, unknown, ignored, clean:
818 l.sort()
817 l.sort()
819 return (modified, added, removed, deleted, unknown, ignored, clean)
818 return (modified, added, removed, deleted, unknown, ignored, clean)
820
819
821 def add(self, list, wlock=None):
820 def add(self, list, wlock=None):
822 if not wlock:
821 if not wlock:
823 wlock = self.wlock()
822 wlock = self.wlock()
824 for f in list:
823 for f in list:
825 p = self.wjoin(f)
824 p = self.wjoin(f)
826 if not os.path.exists(p):
825 if not os.path.exists(p):
827 self.ui.warn(_("%s does not exist!\n") % f)
826 self.ui.warn(_("%s does not exist!\n") % f)
828 elif not os.path.isfile(p):
827 elif not os.path.isfile(p):
829 self.ui.warn(_("%s not added: only files supported currently\n")
828 self.ui.warn(_("%s not added: only files supported currently\n")
830 % f)
829 % f)
831 elif self.dirstate.state(f) in 'an':
830 elif self.dirstate.state(f) in 'an':
832 self.ui.warn(_("%s already tracked!\n") % f)
831 self.ui.warn(_("%s already tracked!\n") % f)
833 else:
832 else:
834 self.dirstate.update([f], "a")
833 self.dirstate.update([f], "a")
835
834
836 def forget(self, list, wlock=None):
835 def forget(self, list, wlock=None):
837 if not wlock:
836 if not wlock:
838 wlock = self.wlock()
837 wlock = self.wlock()
839 for f in list:
838 for f in list:
840 if self.dirstate.state(f) not in 'ai':
839 if self.dirstate.state(f) not in 'ai':
841 self.ui.warn(_("%s not added!\n") % f)
840 self.ui.warn(_("%s not added!\n") % f)
842 else:
841 else:
843 self.dirstate.forget([f])
842 self.dirstate.forget([f])
844
843
845 def remove(self, list, unlink=False, wlock=None):
844 def remove(self, list, unlink=False, wlock=None):
846 if unlink:
845 if unlink:
847 for f in list:
846 for f in list:
848 try:
847 try:
849 util.unlink(self.wjoin(f))
848 util.unlink(self.wjoin(f))
850 except OSError, inst:
849 except OSError, inst:
851 if inst.errno != errno.ENOENT:
850 if inst.errno != errno.ENOENT:
852 raise
851 raise
853 if not wlock:
852 if not wlock:
854 wlock = self.wlock()
853 wlock = self.wlock()
855 for f in list:
854 for f in list:
856 p = self.wjoin(f)
855 p = self.wjoin(f)
857 if os.path.exists(p):
856 if os.path.exists(p):
858 self.ui.warn(_("%s still exists!\n") % f)
857 self.ui.warn(_("%s still exists!\n") % f)
859 elif self.dirstate.state(f) == 'a':
858 elif self.dirstate.state(f) == 'a':
860 self.dirstate.forget([f])
859 self.dirstate.forget([f])
861 elif f not in self.dirstate:
860 elif f not in self.dirstate:
862 self.ui.warn(_("%s not tracked!\n") % f)
861 self.ui.warn(_("%s not tracked!\n") % f)
863 else:
862 else:
864 self.dirstate.update([f], "r")
863 self.dirstate.update([f], "r")
865
864
866 def undelete(self, list, wlock=None):
865 def undelete(self, list, wlock=None):
867 p = self.dirstate.parents()[0]
866 p = self.dirstate.parents()[0]
868 mn = self.changelog.read(p)[0]
867 mn = self.changelog.read(p)[0]
869 m = self.manifest.read(mn)
868 m = self.manifest.read(mn)
870 if not wlock:
869 if not wlock:
871 wlock = self.wlock()
870 wlock = self.wlock()
872 for f in list:
871 for f in list:
873 if self.dirstate.state(f) not in "r":
872 if self.dirstate.state(f) not in "r":
874 self.ui.warn("%s not removed!\n" % f)
873 self.ui.warn("%s not removed!\n" % f)
875 else:
874 else:
876 t = self.file(f).read(m[f])
875 t = self.file(f).read(m[f])
877 self.wwrite(f, t)
876 self.wwrite(f, t)
878 util.set_exec(self.wjoin(f), m.execf(f))
877 util.set_exec(self.wjoin(f), m.execf(f))
879 self.dirstate.update([f], "n")
878 self.dirstate.update([f], "n")
880
879
881 def copy(self, source, dest, wlock=None):
880 def copy(self, source, dest, wlock=None):
882 p = self.wjoin(dest)
881 p = self.wjoin(dest)
883 if not os.path.exists(p):
882 if not os.path.exists(p):
884 self.ui.warn(_("%s does not exist!\n") % dest)
883 self.ui.warn(_("%s does not exist!\n") % dest)
885 elif not os.path.isfile(p):
884 elif not os.path.isfile(p):
886 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
885 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
887 else:
886 else:
888 if not wlock:
887 if not wlock:
889 wlock = self.wlock()
888 wlock = self.wlock()
890 if self.dirstate.state(dest) == '?':
889 if self.dirstate.state(dest) == '?':
891 self.dirstate.update([dest], "a")
890 self.dirstate.update([dest], "a")
892 self.dirstate.copy(source, dest)
891 self.dirstate.copy(source, dest)
893
892
894 def heads(self, start=None):
893 def heads(self, start=None):
895 heads = self.changelog.heads(start)
894 heads = self.changelog.heads(start)
896 # sort the output in rev descending order
895 # sort the output in rev descending order
897 heads = [(-self.changelog.rev(h), h) for h in heads]
896 heads = [(-self.changelog.rev(h), h) for h in heads]
898 heads.sort()
897 heads.sort()
899 return [n for (r, n) in heads]
898 return [n for (r, n) in heads]
900
899
901 # branchlookup returns a dict giving a list of branches for
900 # branchlookup returns a dict giving a list of branches for
902 # each head. A branch is defined as the tag of a node or
901 # each head. A branch is defined as the tag of a node or
903 # the branch of the node's parents. If a node has multiple
902 # the branch of the node's parents. If a node has multiple
904 # branch tags, tags are eliminated if they are visible from other
903 # branch tags, tags are eliminated if they are visible from other
905 # branch tags.
904 # branch tags.
906 #
905 #
907 # So, for this graph: a->b->c->d->e
906 # So, for this graph: a->b->c->d->e
908 # \ /
907 # \ /
909 # aa -----/
908 # aa -----/
910 # a has tag 2.6.12
909 # a has tag 2.6.12
911 # d has tag 2.6.13
910 # d has tag 2.6.13
912 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
911 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
913 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
912 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
914 # from the list.
913 # from the list.
915 #
914 #
916 # It is possible that more than one head will have the same branch tag.
915 # It is possible that more than one head will have the same branch tag.
917 # callers need to check the result for multiple heads under the same
916 # callers need to check the result for multiple heads under the same
918 # branch tag if that is a problem for them (ie checkout of a specific
917 # branch tag if that is a problem for them (ie checkout of a specific
919 # branch).
918 # branch).
920 #
919 #
921 # passing in a specific branch will limit the depth of the search
920 # passing in a specific branch will limit the depth of the search
922 # through the parents. It won't limit the branches returned in the
921 # through the parents. It won't limit the branches returned in the
923 # result though.
922 # result though.
924 def branchlookup(self, heads=None, branch=None):
923 def branchlookup(self, heads=None, branch=None):
925 if not heads:
924 if not heads:
926 heads = self.heads()
925 heads = self.heads()
927 headt = [ h for h in heads ]
926 headt = [ h for h in heads ]
928 chlog = self.changelog
927 chlog = self.changelog
929 branches = {}
928 branches = {}
930 merges = []
929 merges = []
931 seenmerge = {}
930 seenmerge = {}
932
931
933 # traverse the tree once for each head, recording in the branches
932 # traverse the tree once for each head, recording in the branches
934 # dict which tags are visible from this head. The branches
933 # dict which tags are visible from this head. The branches
935 # dict also records which tags are visible from each tag
934 # dict also records which tags are visible from each tag
936 # while we traverse.
935 # while we traverse.
937 while headt or merges:
936 while headt or merges:
938 if merges:
937 if merges:
939 n, found = merges.pop()
938 n, found = merges.pop()
940 visit = [n]
939 visit = [n]
941 else:
940 else:
942 h = headt.pop()
941 h = headt.pop()
943 visit = [h]
942 visit = [h]
944 found = [h]
943 found = [h]
945 seen = {}
944 seen = {}
946 while visit:
945 while visit:
947 n = visit.pop()
946 n = visit.pop()
948 if n in seen:
947 if n in seen:
949 continue
948 continue
950 pp = chlog.parents(n)
949 pp = chlog.parents(n)
951 tags = self.nodetags(n)
950 tags = self.nodetags(n)
952 if tags:
951 if tags:
953 for x in tags:
952 for x in tags:
954 if x == 'tip':
953 if x == 'tip':
955 continue
954 continue
956 for f in found:
955 for f in found:
957 branches.setdefault(f, {})[n] = 1
956 branches.setdefault(f, {})[n] = 1
958 branches.setdefault(n, {})[n] = 1
957 branches.setdefault(n, {})[n] = 1
959 break
958 break
960 if n not in found:
959 if n not in found:
961 found.append(n)
960 found.append(n)
962 if branch in tags:
961 if branch in tags:
963 continue
962 continue
964 seen[n] = 1
963 seen[n] = 1
965 if pp[1] != nullid and n not in seenmerge:
964 if pp[1] != nullid and n not in seenmerge:
966 merges.append((pp[1], [x for x in found]))
965 merges.append((pp[1], [x for x in found]))
967 seenmerge[n] = 1
966 seenmerge[n] = 1
968 if pp[0] != nullid:
967 if pp[0] != nullid:
969 visit.append(pp[0])
968 visit.append(pp[0])
970 # traverse the branches dict, eliminating branch tags from each
969 # traverse the branches dict, eliminating branch tags from each
971 # head that are visible from another branch tag for that head.
970 # head that are visible from another branch tag for that head.
972 out = {}
971 out = {}
973 viscache = {}
972 viscache = {}
974 for h in heads:
973 for h in heads:
975 def visible(node):
974 def visible(node):
976 if node in viscache:
975 if node in viscache:
977 return viscache[node]
976 return viscache[node]
978 ret = {}
977 ret = {}
979 visit = [node]
978 visit = [node]
980 while visit:
979 while visit:
981 x = visit.pop()
980 x = visit.pop()
982 if x in viscache:
981 if x in viscache:
983 ret.update(viscache[x])
982 ret.update(viscache[x])
984 elif x not in ret:
983 elif x not in ret:
985 ret[x] = 1
984 ret[x] = 1
986 if x in branches:
985 if x in branches:
987 visit[len(visit):] = branches[x].keys()
986 visit[len(visit):] = branches[x].keys()
988 viscache[node] = ret
987 viscache[node] = ret
989 return ret
988 return ret
990 if h not in branches:
989 if h not in branches:
991 continue
990 continue
992 # O(n^2), but somewhat limited. This only searches the
991 # O(n^2), but somewhat limited. This only searches the
993 # tags visible from a specific head, not all the tags in the
992 # tags visible from a specific head, not all the tags in the
994 # whole repo.
993 # whole repo.
995 for b in branches[h]:
994 for b in branches[h]:
996 vis = False
995 vis = False
997 for bb in branches[h].keys():
996 for bb in branches[h].keys():
998 if b != bb:
997 if b != bb:
999 if b in visible(bb):
998 if b in visible(bb):
1000 vis = True
999 vis = True
1001 break
1000 break
1002 if not vis:
1001 if not vis:
1003 l = out.setdefault(h, [])
1002 l = out.setdefault(h, [])
1004 l[len(l):] = self.nodetags(b)
1003 l[len(l):] = self.nodetags(b)
1005 return out
1004 return out
1006
1005
1007 def branches(self, nodes):
1006 def branches(self, nodes):
1008 if not nodes:
1007 if not nodes:
1009 nodes = [self.changelog.tip()]
1008 nodes = [self.changelog.tip()]
1010 b = []
1009 b = []
1011 for n in nodes:
1010 for n in nodes:
1012 t = n
1011 t = n
1013 while 1:
1012 while 1:
1014 p = self.changelog.parents(n)
1013 p = self.changelog.parents(n)
1015 if p[1] != nullid or p[0] == nullid:
1014 if p[1] != nullid or p[0] == nullid:
1016 b.append((t, n, p[0], p[1]))
1015 b.append((t, n, p[0], p[1]))
1017 break
1016 break
1018 n = p[0]
1017 n = p[0]
1019 return b
1018 return b
1020
1019
1021 def between(self, pairs):
1020 def between(self, pairs):
1022 r = []
1021 r = []
1023
1022
1024 for top, bottom in pairs:
1023 for top, bottom in pairs:
1025 n, l, i = top, [], 0
1024 n, l, i = top, [], 0
1026 f = 1
1025 f = 1
1027
1026
1028 while n != bottom:
1027 while n != bottom:
1029 p = self.changelog.parents(n)[0]
1028 p = self.changelog.parents(n)[0]
1030 if i == f:
1029 if i == f:
1031 l.append(n)
1030 l.append(n)
1032 f = f * 2
1031 f = f * 2
1033 n = p
1032 n = p
1034 i += 1
1033 i += 1
1035
1034
1036 r.append(l)
1035 r.append(l)
1037
1036
1038 return r
1037 return r
1039
1038
1040 def findincoming(self, remote, base=None, heads=None, force=False):
1039 def findincoming(self, remote, base=None, heads=None, force=False):
1041 """Return list of roots of the subsets of missing nodes from remote
1040 """Return list of roots of the subsets of missing nodes from remote
1042
1041
1043 If base dict is specified, assume that these nodes and their parents
1042 If base dict is specified, assume that these nodes and their parents
1044 exist on the remote side and that no child of a node of base exists
1043 exist on the remote side and that no child of a node of base exists
1045 in both remote and self.
1044 in both remote and self.
1046 Furthermore base will be updated to include the nodes that exists
1045 Furthermore base will be updated to include the nodes that exists
1047 in self and remote but no children exists in self and remote.
1046 in self and remote but no children exists in self and remote.
1048 If a list of heads is specified, return only nodes which are heads
1047 If a list of heads is specified, return only nodes which are heads
1049 or ancestors of these heads.
1048 or ancestors of these heads.
1050
1049
1051 All the ancestors of base are in self and in remote.
1050 All the ancestors of base are in self and in remote.
1052 All the descendants of the list returned are missing in self.
1051 All the descendants of the list returned are missing in self.
1053 (and so we know that the rest of the nodes are missing in remote, see
1052 (and so we know that the rest of the nodes are missing in remote, see
1054 outgoing)
1053 outgoing)
1055 """
1054 """
1056 m = self.changelog.nodemap
1055 m = self.changelog.nodemap
1057 search = []
1056 search = []
1058 fetch = {}
1057 fetch = {}
1059 seen = {}
1058 seen = {}
1060 seenbranch = {}
1059 seenbranch = {}
1061 if base == None:
1060 if base == None:
1062 base = {}
1061 base = {}
1063
1062
1064 if not heads:
1063 if not heads:
1065 heads = remote.heads()
1064 heads = remote.heads()
1066
1065
1067 if self.changelog.tip() == nullid:
1066 if self.changelog.tip() == nullid:
1068 base[nullid] = 1
1067 base[nullid] = 1
1069 if heads != [nullid]:
1068 if heads != [nullid]:
1070 return [nullid]
1069 return [nullid]
1071 return []
1070 return []
1072
1071
1073 # assume we're closer to the tip than the root
1072 # assume we're closer to the tip than the root
1074 # and start by examining the heads
1073 # and start by examining the heads
1075 self.ui.status(_("searching for changes\n"))
1074 self.ui.status(_("searching for changes\n"))
1076
1075
1077 unknown = []
1076 unknown = []
1078 for h in heads:
1077 for h in heads:
1079 if h not in m:
1078 if h not in m:
1080 unknown.append(h)
1079 unknown.append(h)
1081 else:
1080 else:
1082 base[h] = 1
1081 base[h] = 1
1083
1082
1084 if not unknown:
1083 if not unknown:
1085 return []
1084 return []
1086
1085
1087 req = dict.fromkeys(unknown)
1086 req = dict.fromkeys(unknown)
1088 reqcnt = 0
1087 reqcnt = 0
1089
1088
1090 # search through remote branches
1089 # search through remote branches
1091 # a 'branch' here is a linear segment of history, with four parts:
1090 # a 'branch' here is a linear segment of history, with four parts:
1092 # head, root, first parent, second parent
1091 # head, root, first parent, second parent
1093 # (a branch always has two parents (or none) by definition)
1092 # (a branch always has two parents (or none) by definition)
1094 unknown = remote.branches(unknown)
1093 unknown = remote.branches(unknown)
1095 while unknown:
1094 while unknown:
1096 r = []
1095 r = []
1097 while unknown:
1096 while unknown:
1098 n = unknown.pop(0)
1097 n = unknown.pop(0)
1099 if n[0] in seen:
1098 if n[0] in seen:
1100 continue
1099 continue
1101
1100
1102 self.ui.debug(_("examining %s:%s\n")
1101 self.ui.debug(_("examining %s:%s\n")
1103 % (short(n[0]), short(n[1])))
1102 % (short(n[0]), short(n[1])))
1104 if n[0] == nullid: # found the end of the branch
1103 if n[0] == nullid: # found the end of the branch
1105 pass
1104 pass
1106 elif n in seenbranch:
1105 elif n in seenbranch:
1107 self.ui.debug(_("branch already found\n"))
1106 self.ui.debug(_("branch already found\n"))
1108 continue
1107 continue
1109 elif n[1] and n[1] in m: # do we know the base?
1108 elif n[1] and n[1] in m: # do we know the base?
1110 self.ui.debug(_("found incomplete branch %s:%s\n")
1109 self.ui.debug(_("found incomplete branch %s:%s\n")
1111 % (short(n[0]), short(n[1])))
1110 % (short(n[0]), short(n[1])))
1112 search.append(n) # schedule branch range for scanning
1111 search.append(n) # schedule branch range for scanning
1113 seenbranch[n] = 1
1112 seenbranch[n] = 1
1114 else:
1113 else:
1115 if n[1] not in seen and n[1] not in fetch:
1114 if n[1] not in seen and n[1] not in fetch:
1116 if n[2] in m and n[3] in m:
1115 if n[2] in m and n[3] in m:
1117 self.ui.debug(_("found new changeset %s\n") %
1116 self.ui.debug(_("found new changeset %s\n") %
1118 short(n[1]))
1117 short(n[1]))
1119 fetch[n[1]] = 1 # earliest unknown
1118 fetch[n[1]] = 1 # earliest unknown
1120 for p in n[2:4]:
1119 for p in n[2:4]:
1121 if p in m:
1120 if p in m:
1122 base[p] = 1 # latest known
1121 base[p] = 1 # latest known
1123
1122
1124 for p in n[2:4]:
1123 for p in n[2:4]:
1125 if p not in req and p not in m:
1124 if p not in req and p not in m:
1126 r.append(p)
1125 r.append(p)
1127 req[p] = 1
1126 req[p] = 1
1128 seen[n[0]] = 1
1127 seen[n[0]] = 1
1129
1128
1130 if r:
1129 if r:
1131 reqcnt += 1
1130 reqcnt += 1
1132 self.ui.debug(_("request %d: %s\n") %
1131 self.ui.debug(_("request %d: %s\n") %
1133 (reqcnt, " ".join(map(short, r))))
1132 (reqcnt, " ".join(map(short, r))))
1134 for p in range(0, len(r), 10):
1133 for p in range(0, len(r), 10):
1135 for b in remote.branches(r[p:p+10]):
1134 for b in remote.branches(r[p:p+10]):
1136 self.ui.debug(_("received %s:%s\n") %
1135 self.ui.debug(_("received %s:%s\n") %
1137 (short(b[0]), short(b[1])))
1136 (short(b[0]), short(b[1])))
1138 unknown.append(b)
1137 unknown.append(b)
1139
1138
1140 # do binary search on the branches we found
1139 # do binary search on the branches we found
1141 while search:
1140 while search:
1142 n = search.pop(0)
1141 n = search.pop(0)
1143 reqcnt += 1
1142 reqcnt += 1
1144 l = remote.between([(n[0], n[1])])[0]
1143 l = remote.between([(n[0], n[1])])[0]
1145 l.append(n[1])
1144 l.append(n[1])
1146 p = n[0]
1145 p = n[0]
1147 f = 1
1146 f = 1
1148 for i in l:
1147 for i in l:
1149 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1148 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1150 if i in m:
1149 if i in m:
1151 if f <= 2:
1150 if f <= 2:
1152 self.ui.debug(_("found new branch changeset %s\n") %
1151 self.ui.debug(_("found new branch changeset %s\n") %
1153 short(p))
1152 short(p))
1154 fetch[p] = 1
1153 fetch[p] = 1
1155 base[i] = 1
1154 base[i] = 1
1156 else:
1155 else:
1157 self.ui.debug(_("narrowed branch search to %s:%s\n")
1156 self.ui.debug(_("narrowed branch search to %s:%s\n")
1158 % (short(p), short(i)))
1157 % (short(p), short(i)))
1159 search.append((p, i))
1158 search.append((p, i))
1160 break
1159 break
1161 p, f = i, f * 2
1160 p, f = i, f * 2
1162
1161
1163 # sanity check our fetch list
1162 # sanity check our fetch list
1164 for f in fetch.keys():
1163 for f in fetch.keys():
1165 if f in m:
1164 if f in m:
1166 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1165 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1167
1166
1168 if base.keys() == [nullid]:
1167 if base.keys() == [nullid]:
1169 if force:
1168 if force:
1170 self.ui.warn(_("warning: repository is unrelated\n"))
1169 self.ui.warn(_("warning: repository is unrelated\n"))
1171 else:
1170 else:
1172 raise util.Abort(_("repository is unrelated"))
1171 raise util.Abort(_("repository is unrelated"))
1173
1172
1174 self.ui.debug(_("found new changesets starting at ") +
1173 self.ui.debug(_("found new changesets starting at ") +
1175 " ".join([short(f) for f in fetch]) + "\n")
1174 " ".join([short(f) for f in fetch]) + "\n")
1176
1175
1177 self.ui.debug(_("%d total queries\n") % reqcnt)
1176 self.ui.debug(_("%d total queries\n") % reqcnt)
1178
1177
1179 return fetch.keys()
1178 return fetch.keys()
1180
1179
1181 def findoutgoing(self, remote, base=None, heads=None, force=False):
1180 def findoutgoing(self, remote, base=None, heads=None, force=False):
1182 """Return list of nodes that are roots of subsets not in remote
1181 """Return list of nodes that are roots of subsets not in remote
1183
1182
1184 If base dict is specified, assume that these nodes and their parents
1183 If base dict is specified, assume that these nodes and their parents
1185 exist on the remote side.
1184 exist on the remote side.
1186 If a list of heads is specified, return only nodes which are heads
1185 If a list of heads is specified, return only nodes which are heads
1187 or ancestors of these heads, and return a second element which
1186 or ancestors of these heads, and return a second element which
1188 contains all remote heads which get new children.
1187 contains all remote heads which get new children.
1189 """
1188 """
1190 if base == None:
1189 if base == None:
1191 base = {}
1190 base = {}
1192 self.findincoming(remote, base, heads, force=force)
1191 self.findincoming(remote, base, heads, force=force)
1193
1192
1194 self.ui.debug(_("common changesets up to ")
1193 self.ui.debug(_("common changesets up to ")
1195 + " ".join(map(short, base.keys())) + "\n")
1194 + " ".join(map(short, base.keys())) + "\n")
1196
1195
1197 remain = dict.fromkeys(self.changelog.nodemap)
1196 remain = dict.fromkeys(self.changelog.nodemap)
1198
1197
1199 # prune everything remote has from the tree
1198 # prune everything remote has from the tree
1200 del remain[nullid]
1199 del remain[nullid]
1201 remove = base.keys()
1200 remove = base.keys()
1202 while remove:
1201 while remove:
1203 n = remove.pop(0)
1202 n = remove.pop(0)
1204 if n in remain:
1203 if n in remain:
1205 del remain[n]
1204 del remain[n]
1206 for p in self.changelog.parents(n):
1205 for p in self.changelog.parents(n):
1207 remove.append(p)
1206 remove.append(p)
1208
1207
1209 # find every node whose parents have been pruned
1208 # find every node whose parents have been pruned
1210 subset = []
1209 subset = []
1211 # find every remote head that will get new children
1210 # find every remote head that will get new children
1212 updated_heads = {}
1211 updated_heads = {}
1213 for n in remain:
1212 for n in remain:
1214 p1, p2 = self.changelog.parents(n)
1213 p1, p2 = self.changelog.parents(n)
1215 if p1 not in remain and p2 not in remain:
1214 if p1 not in remain and p2 not in remain:
1216 subset.append(n)
1215 subset.append(n)
1217 if heads:
1216 if heads:
1218 if p1 in heads:
1217 if p1 in heads:
1219 updated_heads[p1] = True
1218 updated_heads[p1] = True
1220 if p2 in heads:
1219 if p2 in heads:
1221 updated_heads[p2] = True
1220 updated_heads[p2] = True
1222
1221
1223 # this is the set of all roots we have to push
1222 # this is the set of all roots we have to push
1224 if heads:
1223 if heads:
1225 return subset, updated_heads.keys()
1224 return subset, updated_heads.keys()
1226 else:
1225 else:
1227 return subset
1226 return subset
1228
1227
1229 def pull(self, remote, heads=None, force=False, lock=None):
1228 def pull(self, remote, heads=None, force=False, lock=None):
1230 mylock = False
1229 mylock = False
1231 if not lock:
1230 if not lock:
1232 lock = self.lock()
1231 lock = self.lock()
1233 mylock = True
1232 mylock = True
1234
1233
1235 try:
1234 try:
1236 fetch = self.findincoming(remote, force=force)
1235 fetch = self.findincoming(remote, force=force)
1237 if fetch == [nullid]:
1236 if fetch == [nullid]:
1238 self.ui.status(_("requesting all changes\n"))
1237 self.ui.status(_("requesting all changes\n"))
1239
1238
1240 if not fetch:
1239 if not fetch:
1241 self.ui.status(_("no changes found\n"))
1240 self.ui.status(_("no changes found\n"))
1242 return 0
1241 return 0
1243
1242
1244 if heads is None:
1243 if heads is None:
1245 cg = remote.changegroup(fetch, 'pull')
1244 cg = remote.changegroup(fetch, 'pull')
1246 else:
1245 else:
1247 cg = remote.changegroupsubset(fetch, heads, 'pull')
1246 cg = remote.changegroupsubset(fetch, heads, 'pull')
1248 return self.addchangegroup(cg, 'pull', remote.url())
1247 return self.addchangegroup(cg, 'pull', remote.url())
1249 finally:
1248 finally:
1250 if mylock:
1249 if mylock:
1251 lock.release()
1250 lock.release()
1252
1251
1253 def push(self, remote, force=False, revs=None):
1252 def push(self, remote, force=False, revs=None):
1254 # there are two ways to push to remote repo:
1253 # there are two ways to push to remote repo:
1255 #
1254 #
1256 # addchangegroup assumes local user can lock remote
1255 # addchangegroup assumes local user can lock remote
1257 # repo (local filesystem, old ssh servers).
1256 # repo (local filesystem, old ssh servers).
1258 #
1257 #
1259 # unbundle assumes local user cannot lock remote repo (new ssh
1258 # unbundle assumes local user cannot lock remote repo (new ssh
1260 # servers, http servers).
1259 # servers, http servers).
1261
1260
1262 if remote.capable('unbundle'):
1261 if remote.capable('unbundle'):
1263 return self.push_unbundle(remote, force, revs)
1262 return self.push_unbundle(remote, force, revs)
1264 return self.push_addchangegroup(remote, force, revs)
1263 return self.push_addchangegroup(remote, force, revs)
1265
1264
1266 def prepush(self, remote, force, revs):
1265 def prepush(self, remote, force, revs):
1267 base = {}
1266 base = {}
1268 remote_heads = remote.heads()
1267 remote_heads = remote.heads()
1269 inc = self.findincoming(remote, base, remote_heads, force=force)
1268 inc = self.findincoming(remote, base, remote_heads, force=force)
1270 if not force and inc:
1269 if not force and inc:
1271 self.ui.warn(_("abort: unsynced remote changes!\n"))
1270 self.ui.warn(_("abort: unsynced remote changes!\n"))
1272 self.ui.status(_("(did you forget to sync?"
1271 self.ui.status(_("(did you forget to sync?"
1273 " use push -f to force)\n"))
1272 " use push -f to force)\n"))
1274 return None, 1
1273 return None, 1
1275
1274
1276 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1275 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1277 if revs is not None:
1276 if revs is not None:
1278 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1277 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1279 else:
1278 else:
1280 bases, heads = update, self.changelog.heads()
1279 bases, heads = update, self.changelog.heads()
1281
1280
1282 if not bases:
1281 if not bases:
1283 self.ui.status(_("no changes found\n"))
1282 self.ui.status(_("no changes found\n"))
1284 return None, 1
1283 return None, 1
1285 elif not force:
1284 elif not force:
1286 # FIXME we don't properly detect creation of new heads
1285 # FIXME we don't properly detect creation of new heads
1287 # in the push -r case, assume the user knows what he's doing
1286 # in the push -r case, assume the user knows what he's doing
1288 if not revs and len(remote_heads) < len(heads) \
1287 if not revs and len(remote_heads) < len(heads) \
1289 and remote_heads != [nullid]:
1288 and remote_heads != [nullid]:
1290 self.ui.warn(_("abort: push creates new remote branches!\n"))
1289 self.ui.warn(_("abort: push creates new remote branches!\n"))
1291 self.ui.status(_("(did you forget to merge?"
1290 self.ui.status(_("(did you forget to merge?"
1292 " use push -f to force)\n"))
1291 " use push -f to force)\n"))
1293 return None, 1
1292 return None, 1
1294
1293
1295 if revs is None:
1294 if revs is None:
1296 cg = self.changegroup(update, 'push')
1295 cg = self.changegroup(update, 'push')
1297 else:
1296 else:
1298 cg = self.changegroupsubset(update, revs, 'push')
1297 cg = self.changegroupsubset(update, revs, 'push')
1299 return cg, remote_heads
1298 return cg, remote_heads
1300
1299
1301 def push_addchangegroup(self, remote, force, revs):
1300 def push_addchangegroup(self, remote, force, revs):
1302 lock = remote.lock()
1301 lock = remote.lock()
1303
1302
1304 ret = self.prepush(remote, force, revs)
1303 ret = self.prepush(remote, force, revs)
1305 if ret[0] is not None:
1304 if ret[0] is not None:
1306 cg, remote_heads = ret
1305 cg, remote_heads = ret
1307 return remote.addchangegroup(cg, 'push', self.url())
1306 return remote.addchangegroup(cg, 'push', self.url())
1308 return ret[1]
1307 return ret[1]
1309
1308
1310 def push_unbundle(self, remote, force, revs):
1309 def push_unbundle(self, remote, force, revs):
1311 # local repo finds heads on server, finds out what revs it
1310 # local repo finds heads on server, finds out what revs it
1312 # must push. once revs transferred, if server finds it has
1311 # must push. once revs transferred, if server finds it has
1313 # different heads (someone else won commit/push race), server
1312 # different heads (someone else won commit/push race), server
1314 # aborts.
1313 # aborts.
1315
1314
1316 ret = self.prepush(remote, force, revs)
1315 ret = self.prepush(remote, force, revs)
1317 if ret[0] is not None:
1316 if ret[0] is not None:
1318 cg, remote_heads = ret
1317 cg, remote_heads = ret
1319 if force: remote_heads = ['force']
1318 if force: remote_heads = ['force']
1320 return remote.unbundle(cg, remote_heads, 'push')
1319 return remote.unbundle(cg, remote_heads, 'push')
1321 return ret[1]
1320 return ret[1]
1322
1321
1323 def changegroupsubset(self, bases, heads, source):
1322 def changegroupsubset(self, bases, heads, source):
1324 """This function generates a changegroup consisting of all the nodes
1323 """This function generates a changegroup consisting of all the nodes
1325 that are descendents of any of the bases, and ancestors of any of
1324 that are descendents of any of the bases, and ancestors of any of
1326 the heads.
1325 the heads.
1327
1326
1328 It is fairly complex as determining which filenodes and which
1327 It is fairly complex as determining which filenodes and which
1329 manifest nodes need to be included for the changeset to be complete
1328 manifest nodes need to be included for the changeset to be complete
1330 is non-trivial.
1329 is non-trivial.
1331
1330
1332 Another wrinkle is doing the reverse, figuring out which changeset in
1331 Another wrinkle is doing the reverse, figuring out which changeset in
1333 the changegroup a particular filenode or manifestnode belongs to."""
1332 the changegroup a particular filenode or manifestnode belongs to."""
1334
1333
1335 self.hook('preoutgoing', throw=True, source=source)
1334 self.hook('preoutgoing', throw=True, source=source)
1336
1335
1337 # Set up some initial variables
1336 # Set up some initial variables
1338 # Make it easy to refer to self.changelog
1337 # Make it easy to refer to self.changelog
1339 cl = self.changelog
1338 cl = self.changelog
1340 # msng is short for missing - compute the list of changesets in this
1339 # msng is short for missing - compute the list of changesets in this
1341 # changegroup.
1340 # changegroup.
1342 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1341 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1343 # Some bases may turn out to be superfluous, and some heads may be
1342 # Some bases may turn out to be superfluous, and some heads may be
1344 # too. nodesbetween will return the minimal set of bases and heads
1343 # too. nodesbetween will return the minimal set of bases and heads
1345 # necessary to re-create the changegroup.
1344 # necessary to re-create the changegroup.
1346
1345
1347 # Known heads are the list of heads that it is assumed the recipient
1346 # Known heads are the list of heads that it is assumed the recipient
1348 # of this changegroup will know about.
1347 # of this changegroup will know about.
1349 knownheads = {}
1348 knownheads = {}
1350 # We assume that all parents of bases are known heads.
1349 # We assume that all parents of bases are known heads.
1351 for n in bases:
1350 for n in bases:
1352 for p in cl.parents(n):
1351 for p in cl.parents(n):
1353 if p != nullid:
1352 if p != nullid:
1354 knownheads[p] = 1
1353 knownheads[p] = 1
1355 knownheads = knownheads.keys()
1354 knownheads = knownheads.keys()
1356 if knownheads:
1355 if knownheads:
1357 # Now that we know what heads are known, we can compute which
1356 # Now that we know what heads are known, we can compute which
1358 # changesets are known. The recipient must know about all
1357 # changesets are known. The recipient must know about all
1359 # changesets required to reach the known heads from the null
1358 # changesets required to reach the known heads from the null
1360 # changeset.
1359 # changeset.
1361 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1360 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1362 junk = None
1361 junk = None
1363 # Transform the list into an ersatz set.
1362 # Transform the list into an ersatz set.
1364 has_cl_set = dict.fromkeys(has_cl_set)
1363 has_cl_set = dict.fromkeys(has_cl_set)
1365 else:
1364 else:
1366 # If there were no known heads, the recipient cannot be assumed to
1365 # If there were no known heads, the recipient cannot be assumed to
1367 # know about any changesets.
1366 # know about any changesets.
1368 has_cl_set = {}
1367 has_cl_set = {}
1369
1368
1370 # Make it easy to refer to self.manifest
1369 # Make it easy to refer to self.manifest
1371 mnfst = self.manifest
1370 mnfst = self.manifest
1372 # We don't know which manifests are missing yet
1371 # We don't know which manifests are missing yet
1373 msng_mnfst_set = {}
1372 msng_mnfst_set = {}
1374 # Nor do we know which filenodes are missing.
1373 # Nor do we know which filenodes are missing.
1375 msng_filenode_set = {}
1374 msng_filenode_set = {}
1376
1375
1377 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1376 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1378 junk = None
1377 junk = None
1379
1378
1380 # A changeset always belongs to itself, so the changenode lookup
1379 # A changeset always belongs to itself, so the changenode lookup
1381 # function for a changenode is identity.
1380 # function for a changenode is identity.
1382 def identity(x):
1381 def identity(x):
1383 return x
1382 return x
1384
1383
1385 # A function generating function. Sets up an environment for the
1384 # A function generating function. Sets up an environment for the
1386 # inner function.
1385 # inner function.
1387 def cmp_by_rev_func(revlog):
1386 def cmp_by_rev_func(revlog):
1388 # Compare two nodes by their revision number in the environment's
1387 # Compare two nodes by their revision number in the environment's
1389 # revision history. Since the revision number both represents the
1388 # revision history. Since the revision number both represents the
1390 # most efficient order to read the nodes in, and represents a
1389 # most efficient order to read the nodes in, and represents a
1391 # topological sorting of the nodes, this function is often useful.
1390 # topological sorting of the nodes, this function is often useful.
1392 def cmp_by_rev(a, b):
1391 def cmp_by_rev(a, b):
1393 return cmp(revlog.rev(a), revlog.rev(b))
1392 return cmp(revlog.rev(a), revlog.rev(b))
1394 return cmp_by_rev
1393 return cmp_by_rev
1395
1394
1396 # If we determine that a particular file or manifest node must be a
1395 # If we determine that a particular file or manifest node must be a
1397 # node that the recipient of the changegroup will already have, we can
1396 # node that the recipient of the changegroup will already have, we can
1398 # also assume the recipient will have all the parents. This function
1397 # also assume the recipient will have all the parents. This function
1399 # prunes them from the set of missing nodes.
1398 # prunes them from the set of missing nodes.
1400 def prune_parents(revlog, hasset, msngset):
1399 def prune_parents(revlog, hasset, msngset):
1401 haslst = hasset.keys()
1400 haslst = hasset.keys()
1402 haslst.sort(cmp_by_rev_func(revlog))
1401 haslst.sort(cmp_by_rev_func(revlog))
1403 for node in haslst:
1402 for node in haslst:
1404 parentlst = [p for p in revlog.parents(node) if p != nullid]
1403 parentlst = [p for p in revlog.parents(node) if p != nullid]
1405 while parentlst:
1404 while parentlst:
1406 n = parentlst.pop()
1405 n = parentlst.pop()
1407 if n not in hasset:
1406 if n not in hasset:
1408 hasset[n] = 1
1407 hasset[n] = 1
1409 p = [p for p in revlog.parents(n) if p != nullid]
1408 p = [p for p in revlog.parents(n) if p != nullid]
1410 parentlst.extend(p)
1409 parentlst.extend(p)
1411 for n in hasset:
1410 for n in hasset:
1412 msngset.pop(n, None)
1411 msngset.pop(n, None)
1413
1412
1414 # This is a function generating function used to set up an environment
1413 # This is a function generating function used to set up an environment
1415 # for the inner function to execute in.
1414 # for the inner function to execute in.
1416 def manifest_and_file_collector(changedfileset):
1415 def manifest_and_file_collector(changedfileset):
1417 # This is an information gathering function that gathers
1416 # This is an information gathering function that gathers
1418 # information from each changeset node that goes out as part of
1417 # information from each changeset node that goes out as part of
1419 # the changegroup. The information gathered is a list of which
1418 # the changegroup. The information gathered is a list of which
1420 # manifest nodes are potentially required (the recipient may
1419 # manifest nodes are potentially required (the recipient may
1421 # already have them) and total list of all files which were
1420 # already have them) and total list of all files which were
1422 # changed in any changeset in the changegroup.
1421 # changed in any changeset in the changegroup.
1423 #
1422 #
1424 # We also remember the first changenode we saw any manifest
1423 # We also remember the first changenode we saw any manifest
1425 # referenced by so we can later determine which changenode 'owns'
1424 # referenced by so we can later determine which changenode 'owns'
1426 # the manifest.
1425 # the manifest.
1427 def collect_manifests_and_files(clnode):
1426 def collect_manifests_and_files(clnode):
1428 c = cl.read(clnode)
1427 c = cl.read(clnode)
1429 for f in c[3]:
1428 for f in c[3]:
1430 # This is to make sure we only have one instance of each
1429 # This is to make sure we only have one instance of each
1431 # filename string for each filename.
1430 # filename string for each filename.
1432 changedfileset.setdefault(f, f)
1431 changedfileset.setdefault(f, f)
1433 msng_mnfst_set.setdefault(c[0], clnode)
1432 msng_mnfst_set.setdefault(c[0], clnode)
1434 return collect_manifests_and_files
1433 return collect_manifests_and_files
1435
1434
1436 # Figure out which manifest nodes (of the ones we think might be part
1435 # Figure out which manifest nodes (of the ones we think might be part
1437 # of the changegroup) the recipient must know about and remove them
1436 # of the changegroup) the recipient must know about and remove them
1438 # from the changegroup.
1437 # from the changegroup.
1439 def prune_manifests():
1438 def prune_manifests():
1440 has_mnfst_set = {}
1439 has_mnfst_set = {}
1441 for n in msng_mnfst_set:
1440 for n in msng_mnfst_set:
1442 # If a 'missing' manifest thinks it belongs to a changenode
1441 # If a 'missing' manifest thinks it belongs to a changenode
1443 # the recipient is assumed to have, obviously the recipient
1442 # the recipient is assumed to have, obviously the recipient
1444 # must have that manifest.
1443 # must have that manifest.
1445 linknode = cl.node(mnfst.linkrev(n))
1444 linknode = cl.node(mnfst.linkrev(n))
1446 if linknode in has_cl_set:
1445 if linknode in has_cl_set:
1447 has_mnfst_set[n] = 1
1446 has_mnfst_set[n] = 1
1448 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1447 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1449
1448
1450 # Use the information collected in collect_manifests_and_files to say
1449 # Use the information collected in collect_manifests_and_files to say
1451 # which changenode any manifestnode belongs to.
1450 # which changenode any manifestnode belongs to.
1452 def lookup_manifest_link(mnfstnode):
1451 def lookup_manifest_link(mnfstnode):
1453 return msng_mnfst_set[mnfstnode]
1452 return msng_mnfst_set[mnfstnode]
1454
1453
1455 # A function generating function that sets up the initial environment
1454 # A function generating function that sets up the initial environment
1456 # the inner function.
1455 # the inner function.
1457 def filenode_collector(changedfiles):
1456 def filenode_collector(changedfiles):
1458 next_rev = [0]
1457 next_rev = [0]
1459 # This gathers information from each manifestnode included in the
1458 # This gathers information from each manifestnode included in the
1460 # changegroup about which filenodes the manifest node references
1459 # changegroup about which filenodes the manifest node references
1461 # so we can include those in the changegroup too.
1460 # so we can include those in the changegroup too.
1462 #
1461 #
1463 # It also remembers which changenode each filenode belongs to. It
1462 # It also remembers which changenode each filenode belongs to. It
1464 # does this by assuming the a filenode belongs to the changenode
1463 # does this by assuming the a filenode belongs to the changenode
1465 # the first manifest that references it belongs to.
1464 # the first manifest that references it belongs to.
1466 def collect_msng_filenodes(mnfstnode):
1465 def collect_msng_filenodes(mnfstnode):
1467 r = mnfst.rev(mnfstnode)
1466 r = mnfst.rev(mnfstnode)
1468 if r == next_rev[0]:
1467 if r == next_rev[0]:
1469 # If the last rev we looked at was the one just previous,
1468 # If the last rev we looked at was the one just previous,
1470 # we only need to see a diff.
1469 # we only need to see a diff.
1471 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1470 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1472 # For each line in the delta
1471 # For each line in the delta
1473 for dline in delta.splitlines():
1472 for dline in delta.splitlines():
1474 # get the filename and filenode for that line
1473 # get the filename and filenode for that line
1475 f, fnode = dline.split('\0')
1474 f, fnode = dline.split('\0')
1476 fnode = bin(fnode[:40])
1475 fnode = bin(fnode[:40])
1477 f = changedfiles.get(f, None)
1476 f = changedfiles.get(f, None)
1478 # And if the file is in the list of files we care
1477 # And if the file is in the list of files we care
1479 # about.
1478 # about.
1480 if f is not None:
1479 if f is not None:
1481 # Get the changenode this manifest belongs to
1480 # Get the changenode this manifest belongs to
1482 clnode = msng_mnfst_set[mnfstnode]
1481 clnode = msng_mnfst_set[mnfstnode]
1483 # Create the set of filenodes for the file if
1482 # Create the set of filenodes for the file if
1484 # there isn't one already.
1483 # there isn't one already.
1485 ndset = msng_filenode_set.setdefault(f, {})
1484 ndset = msng_filenode_set.setdefault(f, {})
1486 # And set the filenode's changelog node to the
1485 # And set the filenode's changelog node to the
1487 # manifest's if it hasn't been set already.
1486 # manifest's if it hasn't been set already.
1488 ndset.setdefault(fnode, clnode)
1487 ndset.setdefault(fnode, clnode)
1489 else:
1488 else:
1490 # Otherwise we need a full manifest.
1489 # Otherwise we need a full manifest.
1491 m = mnfst.read(mnfstnode)
1490 m = mnfst.read(mnfstnode)
1492 # For every file in we care about.
1491 # For every file in we care about.
1493 for f in changedfiles:
1492 for f in changedfiles:
1494 fnode = m.get(f, None)
1493 fnode = m.get(f, None)
1495 # If it's in the manifest
1494 # If it's in the manifest
1496 if fnode is not None:
1495 if fnode is not None:
1497 # See comments above.
1496 # See comments above.
1498 clnode = msng_mnfst_set[mnfstnode]
1497 clnode = msng_mnfst_set[mnfstnode]
1499 ndset = msng_filenode_set.setdefault(f, {})
1498 ndset = msng_filenode_set.setdefault(f, {})
1500 ndset.setdefault(fnode, clnode)
1499 ndset.setdefault(fnode, clnode)
1501 # Remember the revision we hope to see next.
1500 # Remember the revision we hope to see next.
1502 next_rev[0] = r + 1
1501 next_rev[0] = r + 1
1503 return collect_msng_filenodes
1502 return collect_msng_filenodes
1504
1503
1505 # We have a list of filenodes we think we need for a file, lets remove
1504 # We have a list of filenodes we think we need for a file, lets remove
1506 # all those we now the recipient must have.
1505 # all those we now the recipient must have.
1507 def prune_filenodes(f, filerevlog):
1506 def prune_filenodes(f, filerevlog):
1508 msngset = msng_filenode_set[f]
1507 msngset = msng_filenode_set[f]
1509 hasset = {}
1508 hasset = {}
1510 # If a 'missing' filenode thinks it belongs to a changenode we
1509 # If a 'missing' filenode thinks it belongs to a changenode we
1511 # assume the recipient must have, then the recipient must have
1510 # assume the recipient must have, then the recipient must have
1512 # that filenode.
1511 # that filenode.
1513 for n in msngset:
1512 for n in msngset:
1514 clnode = cl.node(filerevlog.linkrev(n))
1513 clnode = cl.node(filerevlog.linkrev(n))
1515 if clnode in has_cl_set:
1514 if clnode in has_cl_set:
1516 hasset[n] = 1
1515 hasset[n] = 1
1517 prune_parents(filerevlog, hasset, msngset)
1516 prune_parents(filerevlog, hasset, msngset)
1518
1517
1519 # A function generator function that sets up the a context for the
1518 # A function generator function that sets up the a context for the
1520 # inner function.
1519 # inner function.
1521 def lookup_filenode_link_func(fname):
1520 def lookup_filenode_link_func(fname):
1522 msngset = msng_filenode_set[fname]
1521 msngset = msng_filenode_set[fname]
1523 # Lookup the changenode the filenode belongs to.
1522 # Lookup the changenode the filenode belongs to.
1524 def lookup_filenode_link(fnode):
1523 def lookup_filenode_link(fnode):
1525 return msngset[fnode]
1524 return msngset[fnode]
1526 return lookup_filenode_link
1525 return lookup_filenode_link
1527
1526
1528 # Now that we have all theses utility functions to help out and
1527 # Now that we have all theses utility functions to help out and
1529 # logically divide up the task, generate the group.
1528 # logically divide up the task, generate the group.
1530 def gengroup():
1529 def gengroup():
1531 # The set of changed files starts empty.
1530 # The set of changed files starts empty.
1532 changedfiles = {}
1531 changedfiles = {}
1533 # Create a changenode group generator that will call our functions
1532 # Create a changenode group generator that will call our functions
1534 # back to lookup the owning changenode and collect information.
1533 # back to lookup the owning changenode and collect information.
1535 group = cl.group(msng_cl_lst, identity,
1534 group = cl.group(msng_cl_lst, identity,
1536 manifest_and_file_collector(changedfiles))
1535 manifest_and_file_collector(changedfiles))
1537 for chnk in group:
1536 for chnk in group:
1538 yield chnk
1537 yield chnk
1539
1538
1540 # The list of manifests has been collected by the generator
1539 # The list of manifests has been collected by the generator
1541 # calling our functions back.
1540 # calling our functions back.
1542 prune_manifests()
1541 prune_manifests()
1543 msng_mnfst_lst = msng_mnfst_set.keys()
1542 msng_mnfst_lst = msng_mnfst_set.keys()
1544 # Sort the manifestnodes by revision number.
1543 # Sort the manifestnodes by revision number.
1545 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1544 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1546 # Create a generator for the manifestnodes that calls our lookup
1545 # Create a generator for the manifestnodes that calls our lookup
1547 # and data collection functions back.
1546 # and data collection functions back.
1548 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1547 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1549 filenode_collector(changedfiles))
1548 filenode_collector(changedfiles))
1550 for chnk in group:
1549 for chnk in group:
1551 yield chnk
1550 yield chnk
1552
1551
1553 # These are no longer needed, dereference and toss the memory for
1552 # These are no longer needed, dereference and toss the memory for
1554 # them.
1553 # them.
1555 msng_mnfst_lst = None
1554 msng_mnfst_lst = None
1556 msng_mnfst_set.clear()
1555 msng_mnfst_set.clear()
1557
1556
1558 changedfiles = changedfiles.keys()
1557 changedfiles = changedfiles.keys()
1559 changedfiles.sort()
1558 changedfiles.sort()
1560 # Go through all our files in order sorted by name.
1559 # Go through all our files in order sorted by name.
1561 for fname in changedfiles:
1560 for fname in changedfiles:
1562 filerevlog = self.file(fname)
1561 filerevlog = self.file(fname)
1563 # Toss out the filenodes that the recipient isn't really
1562 # Toss out the filenodes that the recipient isn't really
1564 # missing.
1563 # missing.
1565 if msng_filenode_set.has_key(fname):
1564 if msng_filenode_set.has_key(fname):
1566 prune_filenodes(fname, filerevlog)
1565 prune_filenodes(fname, filerevlog)
1567 msng_filenode_lst = msng_filenode_set[fname].keys()
1566 msng_filenode_lst = msng_filenode_set[fname].keys()
1568 else:
1567 else:
1569 msng_filenode_lst = []
1568 msng_filenode_lst = []
1570 # If any filenodes are left, generate the group for them,
1569 # If any filenodes are left, generate the group for them,
1571 # otherwise don't bother.
1570 # otherwise don't bother.
1572 if len(msng_filenode_lst) > 0:
1571 if len(msng_filenode_lst) > 0:
1573 yield changegroup.genchunk(fname)
1572 yield changegroup.genchunk(fname)
1574 # Sort the filenodes by their revision #
1573 # Sort the filenodes by their revision #
1575 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1574 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1576 # Create a group generator and only pass in a changenode
1575 # Create a group generator and only pass in a changenode
1577 # lookup function as we need to collect no information
1576 # lookup function as we need to collect no information
1578 # from filenodes.
1577 # from filenodes.
1579 group = filerevlog.group(msng_filenode_lst,
1578 group = filerevlog.group(msng_filenode_lst,
1580 lookup_filenode_link_func(fname))
1579 lookup_filenode_link_func(fname))
1581 for chnk in group:
1580 for chnk in group:
1582 yield chnk
1581 yield chnk
1583 if msng_filenode_set.has_key(fname):
1582 if msng_filenode_set.has_key(fname):
1584 # Don't need this anymore, toss it to free memory.
1583 # Don't need this anymore, toss it to free memory.
1585 del msng_filenode_set[fname]
1584 del msng_filenode_set[fname]
1586 # Signal that no more groups are left.
1585 # Signal that no more groups are left.
1587 yield changegroup.closechunk()
1586 yield changegroup.closechunk()
1588
1587
1589 if msng_cl_lst:
1588 if msng_cl_lst:
1590 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1589 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1591
1590
1592 return util.chunkbuffer(gengroup())
1591 return util.chunkbuffer(gengroup())
1593
1592
1594 def changegroup(self, basenodes, source):
1593 def changegroup(self, basenodes, source):
1595 """Generate a changegroup of all nodes that we have that a recipient
1594 """Generate a changegroup of all nodes that we have that a recipient
1596 doesn't.
1595 doesn't.
1597
1596
1598 This is much easier than the previous function as we can assume that
1597 This is much easier than the previous function as we can assume that
1599 the recipient has any changenode we aren't sending them."""
1598 the recipient has any changenode we aren't sending them."""
1600
1599
1601 self.hook('preoutgoing', throw=True, source=source)
1600 self.hook('preoutgoing', throw=True, source=source)
1602
1601
1603 cl = self.changelog
1602 cl = self.changelog
1604 nodes = cl.nodesbetween(basenodes, None)[0]
1603 nodes = cl.nodesbetween(basenodes, None)[0]
1605 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1604 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1606
1605
1607 def identity(x):
1606 def identity(x):
1608 return x
1607 return x
1609
1608
1610 def gennodelst(revlog):
1609 def gennodelst(revlog):
1611 for r in xrange(0, revlog.count()):
1610 for r in xrange(0, revlog.count()):
1612 n = revlog.node(r)
1611 n = revlog.node(r)
1613 if revlog.linkrev(n) in revset:
1612 if revlog.linkrev(n) in revset:
1614 yield n
1613 yield n
1615
1614
1616 def changed_file_collector(changedfileset):
1615 def changed_file_collector(changedfileset):
1617 def collect_changed_files(clnode):
1616 def collect_changed_files(clnode):
1618 c = cl.read(clnode)
1617 c = cl.read(clnode)
1619 for fname in c[3]:
1618 for fname in c[3]:
1620 changedfileset[fname] = 1
1619 changedfileset[fname] = 1
1621 return collect_changed_files
1620 return collect_changed_files
1622
1621
1623 def lookuprevlink_func(revlog):
1622 def lookuprevlink_func(revlog):
1624 def lookuprevlink(n):
1623 def lookuprevlink(n):
1625 return cl.node(revlog.linkrev(n))
1624 return cl.node(revlog.linkrev(n))
1626 return lookuprevlink
1625 return lookuprevlink
1627
1626
1628 def gengroup():
1627 def gengroup():
1629 # construct a list of all changed files
1628 # construct a list of all changed files
1630 changedfiles = {}
1629 changedfiles = {}
1631
1630
1632 for chnk in cl.group(nodes, identity,
1631 for chnk in cl.group(nodes, identity,
1633 changed_file_collector(changedfiles)):
1632 changed_file_collector(changedfiles)):
1634 yield chnk
1633 yield chnk
1635 changedfiles = changedfiles.keys()
1634 changedfiles = changedfiles.keys()
1636 changedfiles.sort()
1635 changedfiles.sort()
1637
1636
1638 mnfst = self.manifest
1637 mnfst = self.manifest
1639 nodeiter = gennodelst(mnfst)
1638 nodeiter = gennodelst(mnfst)
1640 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1639 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1641 yield chnk
1640 yield chnk
1642
1641
1643 for fname in changedfiles:
1642 for fname in changedfiles:
1644 filerevlog = self.file(fname)
1643 filerevlog = self.file(fname)
1645 nodeiter = gennodelst(filerevlog)
1644 nodeiter = gennodelst(filerevlog)
1646 nodeiter = list(nodeiter)
1645 nodeiter = list(nodeiter)
1647 if nodeiter:
1646 if nodeiter:
1648 yield changegroup.genchunk(fname)
1647 yield changegroup.genchunk(fname)
1649 lookup = lookuprevlink_func(filerevlog)
1648 lookup = lookuprevlink_func(filerevlog)
1650 for chnk in filerevlog.group(nodeiter, lookup):
1649 for chnk in filerevlog.group(nodeiter, lookup):
1651 yield chnk
1650 yield chnk
1652
1651
1653 yield changegroup.closechunk()
1652 yield changegroup.closechunk()
1654
1653
1655 if nodes:
1654 if nodes:
1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1655 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657
1656
1658 return util.chunkbuffer(gengroup())
1657 return util.chunkbuffer(gengroup())
1659
1658
1660 def addchangegroup(self, source, srctype, url):
1659 def addchangegroup(self, source, srctype, url):
1661 """add changegroup to repo.
1660 """add changegroup to repo.
1662 returns number of heads modified or added + 1."""
1661 returns number of heads modified or added + 1."""
1663
1662
1664 def csmap(x):
1663 def csmap(x):
1665 self.ui.debug(_("add changeset %s\n") % short(x))
1664 self.ui.debug(_("add changeset %s\n") % short(x))
1666 return cl.count()
1665 return cl.count()
1667
1666
1668 def revmap(x):
1667 def revmap(x):
1669 return cl.rev(x)
1668 return cl.rev(x)
1670
1669
1671 if not source:
1670 if not source:
1672 return 0
1671 return 0
1673
1672
1674 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1673 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1675
1674
1676 changesets = files = revisions = 0
1675 changesets = files = revisions = 0
1677
1676
1678 tr = self.transaction()
1677 tr = self.transaction()
1679
1678
1680 # write changelog data to temp files so concurrent readers will not see
1679 # write changelog data to temp files so concurrent readers will not see
1681 # inconsistent view
1680 # inconsistent view
1682 cl = None
1681 cl = None
1683 try:
1682 try:
1684 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1683 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1685
1684
1686 oldheads = len(cl.heads())
1685 oldheads = len(cl.heads())
1687
1686
1688 # pull off the changeset group
1687 # pull off the changeset group
1689 self.ui.status(_("adding changesets\n"))
1688 self.ui.status(_("adding changesets\n"))
1690 cor = cl.count() - 1
1689 cor = cl.count() - 1
1691 chunkiter = changegroup.chunkiter(source)
1690 chunkiter = changegroup.chunkiter(source)
1692 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1691 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1693 raise util.Abort(_("received changelog group is empty"))
1692 raise util.Abort(_("received changelog group is empty"))
1694 cnr = cl.count() - 1
1693 cnr = cl.count() - 1
1695 changesets = cnr - cor
1694 changesets = cnr - cor
1696
1695
1697 # pull off the manifest group
1696 # pull off the manifest group
1698 self.ui.status(_("adding manifests\n"))
1697 self.ui.status(_("adding manifests\n"))
1699 chunkiter = changegroup.chunkiter(source)
1698 chunkiter = changegroup.chunkiter(source)
1700 # no need to check for empty manifest group here:
1699 # no need to check for empty manifest group here:
1701 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1700 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1702 # no new manifest will be created and the manifest group will
1701 # no new manifest will be created and the manifest group will
1703 # be empty during the pull
1702 # be empty during the pull
1704 self.manifest.addgroup(chunkiter, revmap, tr)
1703 self.manifest.addgroup(chunkiter, revmap, tr)
1705
1704
1706 # process the files
1705 # process the files
1707 self.ui.status(_("adding file changes\n"))
1706 self.ui.status(_("adding file changes\n"))
1708 while 1:
1707 while 1:
1709 f = changegroup.getchunk(source)
1708 f = changegroup.getchunk(source)
1710 if not f:
1709 if not f:
1711 break
1710 break
1712 self.ui.debug(_("adding %s revisions\n") % f)
1711 self.ui.debug(_("adding %s revisions\n") % f)
1713 fl = self.file(f)
1712 fl = self.file(f)
1714 o = fl.count()
1713 o = fl.count()
1715 chunkiter = changegroup.chunkiter(source)
1714 chunkiter = changegroup.chunkiter(source)
1716 if fl.addgroup(chunkiter, revmap, tr) is None:
1715 if fl.addgroup(chunkiter, revmap, tr) is None:
1717 raise util.Abort(_("received file revlog group is empty"))
1716 raise util.Abort(_("received file revlog group is empty"))
1718 revisions += fl.count() - o
1717 revisions += fl.count() - o
1719 files += 1
1718 files += 1
1720
1719
1721 cl.writedata()
1720 cl.writedata()
1722 finally:
1721 finally:
1723 if cl:
1722 if cl:
1724 cl.cleanup()
1723 cl.cleanup()
1725
1724
1726 # make changelog see real files again
1725 # make changelog see real files again
1727 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1726 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1728 self.changelog.checkinlinesize(tr)
1727 self.changelog.checkinlinesize(tr)
1729
1728
1730 newheads = len(self.changelog.heads())
1729 newheads = len(self.changelog.heads())
1731 heads = ""
1730 heads = ""
1732 if oldheads and newheads != oldheads:
1731 if oldheads and newheads != oldheads:
1733 heads = _(" (%+d heads)") % (newheads - oldheads)
1732 heads = _(" (%+d heads)") % (newheads - oldheads)
1734
1733
1735 self.ui.status(_("added %d changesets"
1734 self.ui.status(_("added %d changesets"
1736 " with %d changes to %d files%s\n")
1735 " with %d changes to %d files%s\n")
1737 % (changesets, revisions, files, heads))
1736 % (changesets, revisions, files, heads))
1738
1737
1739 if changesets > 0:
1738 if changesets > 0:
1740 self.hook('pretxnchangegroup', throw=True,
1739 self.hook('pretxnchangegroup', throw=True,
1741 node=hex(self.changelog.node(cor+1)), source=srctype,
1740 node=hex(self.changelog.node(cor+1)), source=srctype,
1742 url=url)
1741 url=url)
1743
1742
1744 tr.close()
1743 tr.close()
1745
1744
1746 if changesets > 0:
1745 if changesets > 0:
1747 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1746 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1748 source=srctype, url=url)
1747 source=srctype, url=url)
1749
1748
1750 for i in range(cor + 1, cnr + 1):
1749 for i in range(cor + 1, cnr + 1):
1751 self.hook("incoming", node=hex(self.changelog.node(i)),
1750 self.hook("incoming", node=hex(self.changelog.node(i)),
1752 source=srctype, url=url)
1751 source=srctype, url=url)
1753
1752
1754 return newheads - oldheads + 1
1753 return newheads - oldheads + 1
1755
1754
1756
1755
1757 def stream_in(self, remote):
1756 def stream_in(self, remote):
1758 fp = remote.stream_out()
1757 fp = remote.stream_out()
1759 resp = int(fp.readline())
1758 resp = int(fp.readline())
1760 if resp != 0:
1759 if resp != 0:
1761 raise util.Abort(_('operation forbidden by server'))
1760 raise util.Abort(_('operation forbidden by server'))
1762 self.ui.status(_('streaming all changes\n'))
1761 self.ui.status(_('streaming all changes\n'))
1763 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1762 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1764 self.ui.status(_('%d files to transfer, %s of data\n') %
1763 self.ui.status(_('%d files to transfer, %s of data\n') %
1765 (total_files, util.bytecount(total_bytes)))
1764 (total_files, util.bytecount(total_bytes)))
1766 start = time.time()
1765 start = time.time()
1767 for i in xrange(total_files):
1766 for i in xrange(total_files):
1768 name, size = fp.readline().split('\0', 1)
1767 name, size = fp.readline().split('\0', 1)
1769 size = int(size)
1768 size = int(size)
1770 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1769 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1771 ofp = self.opener(name, 'w')
1770 ofp = self.opener(name, 'w')
1772 for chunk in util.filechunkiter(fp, limit=size):
1771 for chunk in util.filechunkiter(fp, limit=size):
1773 ofp.write(chunk)
1772 ofp.write(chunk)
1774 ofp.close()
1773 ofp.close()
1775 elapsed = time.time() - start
1774 elapsed = time.time() - start
1776 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1775 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1777 (util.bytecount(total_bytes), elapsed,
1776 (util.bytecount(total_bytes), elapsed,
1778 util.bytecount(total_bytes / elapsed)))
1777 util.bytecount(total_bytes / elapsed)))
1779 self.reload()
1778 self.reload()
1780 return len(self.heads()) + 1
1779 return len(self.heads()) + 1
1781
1780
1782 def clone(self, remote, heads=[], stream=False):
1781 def clone(self, remote, heads=[], stream=False):
1783 '''clone remote repository.
1782 '''clone remote repository.
1784
1783
1785 keyword arguments:
1784 keyword arguments:
1786 heads: list of revs to clone (forces use of pull)
1785 heads: list of revs to clone (forces use of pull)
1787 stream: use streaming clone if possible'''
1786 stream: use streaming clone if possible'''
1788
1787
1789 # now, all clients that can request uncompressed clones can
1788 # now, all clients that can request uncompressed clones can
1790 # read repo formats supported by all servers that can serve
1789 # read repo formats supported by all servers that can serve
1791 # them.
1790 # them.
1792
1791
1793 # if revlog format changes, client will have to check version
1792 # if revlog format changes, client will have to check version
1794 # and format flags on "stream" capability, and use
1793 # and format flags on "stream" capability, and use
1795 # uncompressed only if compatible.
1794 # uncompressed only if compatible.
1796
1795
1797 if stream and not heads and remote.capable('stream'):
1796 if stream and not heads and remote.capable('stream'):
1798 return self.stream_in(remote)
1797 return self.stream_in(remote)
1799 return self.pull(remote, heads)
1798 return self.pull(remote, heads)
1800
1799
1801 # used to avoid circular references so destructors work
1800 # used to avoid circular references so destructors work
1802 def aftertrans(base):
1801 def aftertrans(base):
1803 p = base
1802 p = base
1804 def a():
1803 def a():
1805 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1804 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1806 util.rename(os.path.join(p, "journal.dirstate"),
1805 util.rename(os.path.join(p, "journal.dirstate"),
1807 os.path.join(p, "undo.dirstate"))
1806 os.path.join(p, "undo.dirstate"))
1808 return a
1807 return a
1809
1808
1810 def instance(ui, path, create):
1809 def instance(ui, path, create):
1811 return localrepository(ui, util.drop_scheme('file', path), create)
1810 return localrepository(ui, util.drop_scheme('file', path), create)
1812
1811
1813 def islocal(path):
1812 def islocal(path):
1814 return True
1813 return True
General Comments 0
You need to be logged in to leave comments. Login now