##// END OF EJS Templates
commit: read branch with workingctx
Matt Mackall -
r3440:0f1fd985 default
parent child Browse files
Show More
@@ -1,1813 +1,1810 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {} # avoid recursion in changectx
296 self.branchcache = {} # avoid recursion in changectx
297
297
298 try:
298 try:
299 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
302 if self.changelog.node(lrev) == last: # sanity check
302 if self.changelog.node(lrev) == last: # sanity check
303 for l in f:
303 for l in f:
304 node, label = l.rstrip().split(" ", 1)
304 node, label = l.rstrip().split(" ", 1)
305 self.branchcache[label] = bin(node)
305 self.branchcache[label] = bin(node)
306 f.close()
306 f.close()
307 except IOError:
307 except IOError:
308 last, lrev = nullid, -1
308 last, lrev = nullid, -1
309 lrev = self.changelog.rev(last)
309 lrev = self.changelog.rev(last)
310
310
311 tip = self.changelog.count() - 1
311 tip = self.changelog.count() - 1
312 if lrev != tip:
312 if lrev != tip:
313 for r in xrange(lrev + 1, tip + 1):
313 for r in xrange(lrev + 1, tip + 1):
314 c = self.changectx(r)
314 c = self.changectx(r)
315 b = c.branch()
315 b = c.branch()
316 if b:
316 if b:
317 self.branchcache[b] = c.node()
317 self.branchcache[b] = c.node()
318 self._writebranchcache()
318 self._writebranchcache()
319
319
320 return self.branchcache
320 return self.branchcache
321
321
322 def _writebranchcache(self):
322 def _writebranchcache(self):
323 f = self.opener("branches.cache", "w")
323 f = self.opener("branches.cache", "w")
324 t = self.changelog.tip()
324 t = self.changelog.tip()
325 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
325 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 for label, node in self.branchcache.iteritems():
326 for label, node in self.branchcache.iteritems():
327 f.write("%s %s\n" % (hex(node), label))
327 f.write("%s %s\n" % (hex(node), label))
328
328
329 def lookup(self, key):
329 def lookup(self, key):
330 if key == '.':
330 if key == '.':
331 key = self.dirstate.parents()[0]
331 key = self.dirstate.parents()[0]
332 if key == nullid:
332 if key == nullid:
333 raise repo.RepoError(_("no revision checked out"))
333 raise repo.RepoError(_("no revision checked out"))
334 if key in self.tags():
334 if key in self.tags():
335 return self.tags()[key]
335 return self.tags()[key]
336 if key in self.branchtags():
336 if key in self.branchtags():
337 return self.branchtags()[key]
337 return self.branchtags()[key]
338 try:
338 try:
339 return self.changelog.lookup(key)
339 return self.changelog.lookup(key)
340 except:
340 except:
341 raise repo.RepoError(_("unknown revision '%s'") % key)
341 raise repo.RepoError(_("unknown revision '%s'") % key)
342
342
343 def dev(self):
343 def dev(self):
344 return os.lstat(self.path).st_dev
344 return os.lstat(self.path).st_dev
345
345
346 def local(self):
346 def local(self):
347 return True
347 return True
348
348
349 def join(self, f):
349 def join(self, f):
350 return os.path.join(self.path, f)
350 return os.path.join(self.path, f)
351
351
352 def wjoin(self, f):
352 def wjoin(self, f):
353 return os.path.join(self.root, f)
353 return os.path.join(self.root, f)
354
354
355 def file(self, f):
355 def file(self, f):
356 if f[0] == '/':
356 if f[0] == '/':
357 f = f[1:]
357 f = f[1:]
358 return filelog.filelog(self.opener, f, self.revlogversion)
358 return filelog.filelog(self.opener, f, self.revlogversion)
359
359
360 def changectx(self, changeid=None):
360 def changectx(self, changeid=None):
361 return context.changectx(self, changeid)
361 return context.changectx(self, changeid)
362
362
363 def workingctx(self):
363 def workingctx(self):
364 return context.workingctx(self)
364 return context.workingctx(self)
365
365
366 def parents(self, changeid=None):
366 def parents(self, changeid=None):
367 '''
367 '''
368 get list of changectxs for parents of changeid or working directory
368 get list of changectxs for parents of changeid or working directory
369 '''
369 '''
370 if changeid is None:
370 if changeid is None:
371 pl = self.dirstate.parents()
371 pl = self.dirstate.parents()
372 else:
372 else:
373 n = self.changelog.lookup(changeid)
373 n = self.changelog.lookup(changeid)
374 pl = self.changelog.parents(n)
374 pl = self.changelog.parents(n)
375 if pl[1] == nullid:
375 if pl[1] == nullid:
376 return [self.changectx(pl[0])]
376 return [self.changectx(pl[0])]
377 return [self.changectx(pl[0]), self.changectx(pl[1])]
377 return [self.changectx(pl[0]), self.changectx(pl[1])]
378
378
379 def filectx(self, path, changeid=None, fileid=None):
379 def filectx(self, path, changeid=None, fileid=None):
380 """changeid can be a changeset revision, node, or tag.
380 """changeid can be a changeset revision, node, or tag.
381 fileid can be a file revision or node."""
381 fileid can be a file revision or node."""
382 return context.filectx(self, path, changeid, fileid)
382 return context.filectx(self, path, changeid, fileid)
383
383
384 def getcwd(self):
384 def getcwd(self):
385 return self.dirstate.getcwd()
385 return self.dirstate.getcwd()
386
386
387 def wfile(self, f, mode='r'):
387 def wfile(self, f, mode='r'):
388 return self.wopener(f, mode)
388 return self.wopener(f, mode)
389
389
390 def wread(self, filename):
390 def wread(self, filename):
391 if self.encodepats == None:
391 if self.encodepats == None:
392 l = []
392 l = []
393 for pat, cmd in self.ui.configitems("encode"):
393 for pat, cmd in self.ui.configitems("encode"):
394 mf = util.matcher(self.root, "", [pat], [], [])[1]
394 mf = util.matcher(self.root, "", [pat], [], [])[1]
395 l.append((mf, cmd))
395 l.append((mf, cmd))
396 self.encodepats = l
396 self.encodepats = l
397
397
398 data = self.wopener(filename, 'r').read()
398 data = self.wopener(filename, 'r').read()
399
399
400 for mf, cmd in self.encodepats:
400 for mf, cmd in self.encodepats:
401 if mf(filename):
401 if mf(filename):
402 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
402 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
403 data = util.filter(data, cmd)
403 data = util.filter(data, cmd)
404 break
404 break
405
405
406 return data
406 return data
407
407
408 def wwrite(self, filename, data, fd=None):
408 def wwrite(self, filename, data, fd=None):
409 if self.decodepats == None:
409 if self.decodepats == None:
410 l = []
410 l = []
411 for pat, cmd in self.ui.configitems("decode"):
411 for pat, cmd in self.ui.configitems("decode"):
412 mf = util.matcher(self.root, "", [pat], [], [])[1]
412 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 l.append((mf, cmd))
413 l.append((mf, cmd))
414 self.decodepats = l
414 self.decodepats = l
415
415
416 for mf, cmd in self.decodepats:
416 for mf, cmd in self.decodepats:
417 if mf(filename):
417 if mf(filename):
418 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
418 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
419 data = util.filter(data, cmd)
419 data = util.filter(data, cmd)
420 break
420 break
421
421
422 if fd:
422 if fd:
423 return fd.write(data)
423 return fd.write(data)
424 return self.wopener(filename, 'w').write(data)
424 return self.wopener(filename, 'w').write(data)
425
425
426 def transaction(self):
426 def transaction(self):
427 tr = self.transhandle
427 tr = self.transhandle
428 if tr != None and tr.running():
428 if tr != None and tr.running():
429 return tr.nest()
429 return tr.nest()
430
430
431 # save dirstate for rollback
431 # save dirstate for rollback
432 try:
432 try:
433 ds = self.opener("dirstate").read()
433 ds = self.opener("dirstate").read()
434 except IOError:
434 except IOError:
435 ds = ""
435 ds = ""
436 self.opener("journal.dirstate", "w").write(ds)
436 self.opener("journal.dirstate", "w").write(ds)
437
437
438 tr = transaction.transaction(self.ui.warn, self.opener,
438 tr = transaction.transaction(self.ui.warn, self.opener,
439 self.join("journal"),
439 self.join("journal"),
440 aftertrans(self.path))
440 aftertrans(self.path))
441 self.transhandle = tr
441 self.transhandle = tr
442 return tr
442 return tr
443
443
444 def recover(self):
444 def recover(self):
445 l = self.lock()
445 l = self.lock()
446 if os.path.exists(self.join("journal")):
446 if os.path.exists(self.join("journal")):
447 self.ui.status(_("rolling back interrupted transaction\n"))
447 self.ui.status(_("rolling back interrupted transaction\n"))
448 transaction.rollback(self.opener, self.join("journal"))
448 transaction.rollback(self.opener, self.join("journal"))
449 self.reload()
449 self.reload()
450 return True
450 return True
451 else:
451 else:
452 self.ui.warn(_("no interrupted transaction available\n"))
452 self.ui.warn(_("no interrupted transaction available\n"))
453 return False
453 return False
454
454
455 def rollback(self, wlock=None):
455 def rollback(self, wlock=None):
456 if not wlock:
456 if not wlock:
457 wlock = self.wlock()
457 wlock = self.wlock()
458 l = self.lock()
458 l = self.lock()
459 if os.path.exists(self.join("undo")):
459 if os.path.exists(self.join("undo")):
460 self.ui.status(_("rolling back last transaction\n"))
460 self.ui.status(_("rolling back last transaction\n"))
461 transaction.rollback(self.opener, self.join("undo"))
461 transaction.rollback(self.opener, self.join("undo"))
462 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
462 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
463 self.reload()
463 self.reload()
464 self.wreload()
464 self.wreload()
465 else:
465 else:
466 self.ui.warn(_("no rollback information available\n"))
466 self.ui.warn(_("no rollback information available\n"))
467
467
468 def wreload(self):
468 def wreload(self):
469 self.dirstate.read()
469 self.dirstate.read()
470
470
471 def reload(self):
471 def reload(self):
472 self.changelog.load()
472 self.changelog.load()
473 self.manifest.load()
473 self.manifest.load()
474 self.tagscache = None
474 self.tagscache = None
475 self.nodetagscache = None
475 self.nodetagscache = None
476
476
477 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
477 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
478 desc=None):
478 desc=None):
479 try:
479 try:
480 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
480 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
481 except lock.LockHeld, inst:
481 except lock.LockHeld, inst:
482 if not wait:
482 if not wait:
483 raise
483 raise
484 self.ui.warn(_("waiting for lock on %s held by %s\n") %
484 self.ui.warn(_("waiting for lock on %s held by %s\n") %
485 (desc, inst.args[0]))
485 (desc, inst.args[0]))
486 # default to 600 seconds timeout
486 # default to 600 seconds timeout
487 l = lock.lock(self.join(lockname),
487 l = lock.lock(self.join(lockname),
488 int(self.ui.config("ui", "timeout") or 600),
488 int(self.ui.config("ui", "timeout") or 600),
489 releasefn, desc=desc)
489 releasefn, desc=desc)
490 if acquirefn:
490 if acquirefn:
491 acquirefn()
491 acquirefn()
492 return l
492 return l
493
493
494 def lock(self, wait=1):
494 def lock(self, wait=1):
495 return self.do_lock("lock", wait, acquirefn=self.reload,
495 return self.do_lock("lock", wait, acquirefn=self.reload,
496 desc=_('repository %s') % self.origroot)
496 desc=_('repository %s') % self.origroot)
497
497
498 def wlock(self, wait=1):
498 def wlock(self, wait=1):
499 return self.do_lock("wlock", wait, self.dirstate.write,
499 return self.do_lock("wlock", wait, self.dirstate.write,
500 self.wreload,
500 self.wreload,
501 desc=_('working directory of %s') % self.origroot)
501 desc=_('working directory of %s') % self.origroot)
502
502
503 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
503 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
504 """
504 """
505 commit an individual file as part of a larger transaction
505 commit an individual file as part of a larger transaction
506 """
506 """
507
507
508 t = self.wread(fn)
508 t = self.wread(fn)
509 fl = self.file(fn)
509 fl = self.file(fn)
510 fp1 = manifest1.get(fn, nullid)
510 fp1 = manifest1.get(fn, nullid)
511 fp2 = manifest2.get(fn, nullid)
511 fp2 = manifest2.get(fn, nullid)
512
512
513 meta = {}
513 meta = {}
514 cp = self.dirstate.copied(fn)
514 cp = self.dirstate.copied(fn)
515 if cp:
515 if cp:
516 meta["copy"] = cp
516 meta["copy"] = cp
517 if not manifest2: # not a branch merge
517 if not manifest2: # not a branch merge
518 meta["copyrev"] = hex(manifest1.get(cp, nullid))
518 meta["copyrev"] = hex(manifest1.get(cp, nullid))
519 fp2 = nullid
519 fp2 = nullid
520 elif fp2 != nullid: # copied on remote side
520 elif fp2 != nullid: # copied on remote side
521 meta["copyrev"] = hex(manifest1.get(cp, nullid))
521 meta["copyrev"] = hex(manifest1.get(cp, nullid))
522 else: # copied on local side, reversed
522 else: # copied on local side, reversed
523 meta["copyrev"] = hex(manifest2.get(cp))
523 meta["copyrev"] = hex(manifest2.get(cp))
524 fp2 = nullid
524 fp2 = nullid
525 self.ui.debug(_(" %s: copy %s:%s\n") %
525 self.ui.debug(_(" %s: copy %s:%s\n") %
526 (fn, cp, meta["copyrev"]))
526 (fn, cp, meta["copyrev"]))
527 fp1 = nullid
527 fp1 = nullid
528 elif fp2 != nullid:
528 elif fp2 != nullid:
529 # is one parent an ancestor of the other?
529 # is one parent an ancestor of the other?
530 fpa = fl.ancestor(fp1, fp2)
530 fpa = fl.ancestor(fp1, fp2)
531 if fpa == fp1:
531 if fpa == fp1:
532 fp1, fp2 = fp2, nullid
532 fp1, fp2 = fp2, nullid
533 elif fpa == fp2:
533 elif fpa == fp2:
534 fp2 = nullid
534 fp2 = nullid
535
535
536 # is the file unmodified from the parent? report existing entry
536 # is the file unmodified from the parent? report existing entry
537 if fp2 == nullid and not fl.cmp(fp1, t):
537 if fp2 == nullid and not fl.cmp(fp1, t):
538 return fp1
538 return fp1
539
539
540 changelist.append(fn)
540 changelist.append(fn)
541 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
541 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
542
542
543 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
543 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
544 orig_parent = self.dirstate.parents()[0] or nullid
544 orig_parent = self.dirstate.parents()[0] or nullid
545 p1 = p1 or self.dirstate.parents()[0] or nullid
545 p1 = p1 or self.dirstate.parents()[0] or nullid
546 p2 = p2 or self.dirstate.parents()[1] or nullid
546 p2 = p2 or self.dirstate.parents()[1] or nullid
547 c1 = self.changelog.read(p1)
547 c1 = self.changelog.read(p1)
548 c2 = self.changelog.read(p2)
548 c2 = self.changelog.read(p2)
549 m1 = self.manifest.read(c1[0]).copy()
549 m1 = self.manifest.read(c1[0]).copy()
550 m2 = self.manifest.read(c2[0])
550 m2 = self.manifest.read(c2[0])
551 changed = []
551 changed = []
552 removed = []
552 removed = []
553
553
554 if orig_parent == p1:
554 if orig_parent == p1:
555 update_dirstate = 1
555 update_dirstate = 1
556 else:
556 else:
557 update_dirstate = 0
557 update_dirstate = 0
558
558
559 if not wlock:
559 if not wlock:
560 wlock = self.wlock()
560 wlock = self.wlock()
561 l = self.lock()
561 l = self.lock()
562 tr = self.transaction()
562 tr = self.transaction()
563 linkrev = self.changelog.count()
563 linkrev = self.changelog.count()
564 for f in files:
564 for f in files:
565 try:
565 try:
566 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
566 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 except IOError:
568 except IOError:
569 try:
569 try:
570 del m1[f]
570 del m1[f]
571 if update_dirstate:
571 if update_dirstate:
572 self.dirstate.forget([f])
572 self.dirstate.forget([f])
573 removed.append(f)
573 removed.append(f)
574 except:
574 except:
575 # deleted from p2?
575 # deleted from p2?
576 pass
576 pass
577
577
578 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
578 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
579 user = user or self.ui.username()
579 user = user or self.ui.username()
580 n = self.changelog.add(mnode, changed + removed, text,
580 n = self.changelog.add(mnode, changed + removed, text,
581 tr, p1, p2, user, date)
581 tr, p1, p2, user, date)
582 tr.close()
582 tr.close()
583 if update_dirstate:
583 if update_dirstate:
584 self.dirstate.setparents(n, nullid)
584 self.dirstate.setparents(n, nullid)
585
585
586 def commit(self, files=None, text="", user=None, date=None,
586 def commit(self, files=None, text="", user=None, date=None,
587 match=util.always, force=False, lock=None, wlock=None,
587 match=util.always, force=False, lock=None, wlock=None,
588 force_editor=False):
588 force_editor=False):
589 commit = []
589 commit = []
590 remove = []
590 remove = []
591 changed = []
591 changed = []
592
592
593 if files:
593 if files:
594 for f in files:
594 for f in files:
595 s = self.dirstate.state(f)
595 s = self.dirstate.state(f)
596 if s in 'nmai':
596 if s in 'nmai':
597 commit.append(f)
597 commit.append(f)
598 elif s == 'r':
598 elif s == 'r':
599 remove.append(f)
599 remove.append(f)
600 else:
600 else:
601 self.ui.warn(_("%s not tracked!\n") % f)
601 self.ui.warn(_("%s not tracked!\n") % f)
602 else:
602 else:
603 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
603 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
604 commit = modified + added
604 commit = modified + added
605 remove = removed
605 remove = removed
606
606
607 p1, p2 = self.dirstate.parents()
607 p1, p2 = self.dirstate.parents()
608 c1 = self.changelog.read(p1)
608 c1 = self.changelog.read(p1)
609 c2 = self.changelog.read(p2)
609 c2 = self.changelog.read(p2)
610 m1 = self.manifest.read(c1[0]).copy()
610 m1 = self.manifest.read(c1[0]).copy()
611 m2 = self.manifest.read(c2[0])
611 m2 = self.manifest.read(c2[0])
612
612
613 try:
613 branchname = self.workingctx().branch()
614 branchname = self.opener("branch").read().rstrip()
615 except IOError:
616 branchname = ""
617 oldname = c1[5].get("branch", "")
614 oldname = c1[5].get("branch", "")
618
615
619 if not commit and not remove and not force and p2 == nullid and \
616 if not commit and not remove and not force and p2 == nullid and \
620 branchname == oldname:
617 branchname == oldname:
621 self.ui.status(_("nothing changed\n"))
618 self.ui.status(_("nothing changed\n"))
622 return None
619 return None
623
620
624 xp1 = hex(p1)
621 xp1 = hex(p1)
625 if p2 == nullid: xp2 = ''
622 if p2 == nullid: xp2 = ''
626 else: xp2 = hex(p2)
623 else: xp2 = hex(p2)
627
624
628 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
625 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
629
626
630 if not wlock:
627 if not wlock:
631 wlock = self.wlock()
628 wlock = self.wlock()
632 if not lock:
629 if not lock:
633 lock = self.lock()
630 lock = self.lock()
634 tr = self.transaction()
631 tr = self.transaction()
635
632
636 # check in files
633 # check in files
637 new = {}
634 new = {}
638 linkrev = self.changelog.count()
635 linkrev = self.changelog.count()
639 commit.sort()
636 commit.sort()
640 for f in commit:
637 for f in commit:
641 self.ui.note(f + "\n")
638 self.ui.note(f + "\n")
642 try:
639 try:
643 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
640 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
644 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
641 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
645 except IOError:
642 except IOError:
646 self.ui.warn(_("trouble committing %s!\n") % f)
643 self.ui.warn(_("trouble committing %s!\n") % f)
647 raise
644 raise
648
645
649 # update manifest
646 # update manifest
650 m1.update(new)
647 m1.update(new)
651 for f in remove:
648 for f in remove:
652 if f in m1:
649 if f in m1:
653 del m1[f]
650 del m1[f]
654 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
651 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
655
652
656 # add changeset
653 # add changeset
657 new = new.keys()
654 new = new.keys()
658 new.sort()
655 new.sort()
659
656
660 user = user or self.ui.username()
657 user = user or self.ui.username()
661 if not text or force_editor:
658 if not text or force_editor:
662 edittext = []
659 edittext = []
663 if text:
660 if text:
664 edittext.append(text)
661 edittext.append(text)
665 edittext.append("")
662 edittext.append("")
666 if p2 != nullid:
663 if p2 != nullid:
667 edittext.append("HG: branch merge")
664 edittext.append("HG: branch merge")
668 edittext.extend(["HG: changed %s" % f for f in changed])
665 edittext.extend(["HG: changed %s" % f for f in changed])
669 edittext.extend(["HG: removed %s" % f for f in remove])
666 edittext.extend(["HG: removed %s" % f for f in remove])
670 if not changed and not remove:
667 if not changed and not remove:
671 edittext.append("HG: no files changed")
668 edittext.append("HG: no files changed")
672 edittext.append("")
669 edittext.append("")
673 # run editor in the repository root
670 # run editor in the repository root
674 olddir = os.getcwd()
671 olddir = os.getcwd()
675 os.chdir(self.root)
672 os.chdir(self.root)
676 text = self.ui.edit("\n".join(edittext), user)
673 text = self.ui.edit("\n".join(edittext), user)
677 os.chdir(olddir)
674 os.chdir(olddir)
678
675
679 lines = [line.rstrip() for line in text.rstrip().splitlines()]
676 lines = [line.rstrip() for line in text.rstrip().splitlines()]
680 while lines and not lines[0]:
677 while lines and not lines[0]:
681 del lines[0]
678 del lines[0]
682 if not lines:
679 if not lines:
683 return None
680 return None
684 text = '\n'.join(lines)
681 text = '\n'.join(lines)
685 extra = {}
682 extra = {}
686 if branchname:
683 if branchname:
687 extra["branch"] = branchname
684 extra["branch"] = branchname
688 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
685 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
689 user, date, extra)
686 user, date, extra)
690 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
687 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
691 parent2=xp2)
688 parent2=xp2)
692 tr.close()
689 tr.close()
693
690
694 self.dirstate.setparents(n)
691 self.dirstate.setparents(n)
695 self.dirstate.update(new, "n")
692 self.dirstate.update(new, "n")
696 self.dirstate.forget(remove)
693 self.dirstate.forget(remove)
697
694
698 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
695 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
699 return n
696 return n
700
697
701 def walk(self, node=None, files=[], match=util.always, badmatch=None):
698 def walk(self, node=None, files=[], match=util.always, badmatch=None):
702 if node:
699 if node:
703 fdict = dict.fromkeys(files)
700 fdict = dict.fromkeys(files)
704 for fn in self.manifest.read(self.changelog.read(node)[0]):
701 for fn in self.manifest.read(self.changelog.read(node)[0]):
705 for ffn in fdict:
702 for ffn in fdict:
706 # match if the file is the exact name or a directory
703 # match if the file is the exact name or a directory
707 if ffn == fn or fn.startswith("%s/" % ffn):
704 if ffn == fn or fn.startswith("%s/" % ffn):
708 del fdict[ffn]
705 del fdict[ffn]
709 break
706 break
710 if match(fn):
707 if match(fn):
711 yield 'm', fn
708 yield 'm', fn
712 for fn in fdict:
709 for fn in fdict:
713 if badmatch and badmatch(fn):
710 if badmatch and badmatch(fn):
714 if match(fn):
711 if match(fn):
715 yield 'b', fn
712 yield 'b', fn
716 else:
713 else:
717 self.ui.warn(_('%s: No such file in rev %s\n') % (
714 self.ui.warn(_('%s: No such file in rev %s\n') % (
718 util.pathto(self.getcwd(), fn), short(node)))
715 util.pathto(self.getcwd(), fn), short(node)))
719 else:
716 else:
720 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
717 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
721 yield src, fn
718 yield src, fn
722
719
723 def status(self, node1=None, node2=None, files=[], match=util.always,
720 def status(self, node1=None, node2=None, files=[], match=util.always,
724 wlock=None, list_ignored=False, list_clean=False):
721 wlock=None, list_ignored=False, list_clean=False):
725 """return status of files between two nodes or node and working directory
722 """return status of files between two nodes or node and working directory
726
723
727 If node1 is None, use the first dirstate parent instead.
724 If node1 is None, use the first dirstate parent instead.
728 If node2 is None, compare node1 with working directory.
725 If node2 is None, compare node1 with working directory.
729 """
726 """
730
727
731 def fcmp(fn, mf):
728 def fcmp(fn, mf):
732 t1 = self.wread(fn)
729 t1 = self.wread(fn)
733 return self.file(fn).cmp(mf.get(fn, nullid), t1)
730 return self.file(fn).cmp(mf.get(fn, nullid), t1)
734
731
735 def mfmatches(node):
732 def mfmatches(node):
736 change = self.changelog.read(node)
733 change = self.changelog.read(node)
737 mf = self.manifest.read(change[0]).copy()
734 mf = self.manifest.read(change[0]).copy()
738 for fn in mf.keys():
735 for fn in mf.keys():
739 if not match(fn):
736 if not match(fn):
740 del mf[fn]
737 del mf[fn]
741 return mf
738 return mf
742
739
743 modified, added, removed, deleted, unknown = [], [], [], [], []
740 modified, added, removed, deleted, unknown = [], [], [], [], []
744 ignored, clean = [], []
741 ignored, clean = [], []
745
742
746 compareworking = False
743 compareworking = False
747 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
744 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
748 compareworking = True
745 compareworking = True
749
746
750 if not compareworking:
747 if not compareworking:
751 # read the manifest from node1 before the manifest from node2,
748 # read the manifest from node1 before the manifest from node2,
752 # so that we'll hit the manifest cache if we're going through
749 # so that we'll hit the manifest cache if we're going through
753 # all the revisions in parent->child order.
750 # all the revisions in parent->child order.
754 mf1 = mfmatches(node1)
751 mf1 = mfmatches(node1)
755
752
756 # are we comparing the working directory?
753 # are we comparing the working directory?
757 if not node2:
754 if not node2:
758 if not wlock:
755 if not wlock:
759 try:
756 try:
760 wlock = self.wlock(wait=0)
757 wlock = self.wlock(wait=0)
761 except lock.LockException:
758 except lock.LockException:
762 wlock = None
759 wlock = None
763 (lookup, modified, added, removed, deleted, unknown,
760 (lookup, modified, added, removed, deleted, unknown,
764 ignored, clean) = self.dirstate.status(files, match,
761 ignored, clean) = self.dirstate.status(files, match,
765 list_ignored, list_clean)
762 list_ignored, list_clean)
766
763
767 # are we comparing working dir against its parent?
764 # are we comparing working dir against its parent?
768 if compareworking:
765 if compareworking:
769 if lookup:
766 if lookup:
770 # do a full compare of any files that might have changed
767 # do a full compare of any files that might have changed
771 mf2 = mfmatches(self.dirstate.parents()[0])
768 mf2 = mfmatches(self.dirstate.parents()[0])
772 for f in lookup:
769 for f in lookup:
773 if fcmp(f, mf2):
770 if fcmp(f, mf2):
774 modified.append(f)
771 modified.append(f)
775 else:
772 else:
776 clean.append(f)
773 clean.append(f)
777 if wlock is not None:
774 if wlock is not None:
778 self.dirstate.update([f], "n")
775 self.dirstate.update([f], "n")
779 else:
776 else:
780 # we are comparing working dir against non-parent
777 # we are comparing working dir against non-parent
781 # generate a pseudo-manifest for the working dir
778 # generate a pseudo-manifest for the working dir
782 # XXX: create it in dirstate.py ?
779 # XXX: create it in dirstate.py ?
783 mf2 = mfmatches(self.dirstate.parents()[0])
780 mf2 = mfmatches(self.dirstate.parents()[0])
784 for f in lookup + modified + added:
781 for f in lookup + modified + added:
785 mf2[f] = ""
782 mf2[f] = ""
786 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
783 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
787 for f in removed:
784 for f in removed:
788 if f in mf2:
785 if f in mf2:
789 del mf2[f]
786 del mf2[f]
790 else:
787 else:
791 # we are comparing two revisions
788 # we are comparing two revisions
792 mf2 = mfmatches(node2)
789 mf2 = mfmatches(node2)
793
790
794 if not compareworking:
791 if not compareworking:
795 # flush lists from dirstate before comparing manifests
792 # flush lists from dirstate before comparing manifests
796 modified, added, clean = [], [], []
793 modified, added, clean = [], [], []
797
794
798 # make sure to sort the files so we talk to the disk in a
795 # make sure to sort the files so we talk to the disk in a
799 # reasonable order
796 # reasonable order
800 mf2keys = mf2.keys()
797 mf2keys = mf2.keys()
801 mf2keys.sort()
798 mf2keys.sort()
802 for fn in mf2keys:
799 for fn in mf2keys:
803 if mf1.has_key(fn):
800 if mf1.has_key(fn):
804 if mf1.flags(fn) != mf2.flags(fn) or \
801 if mf1.flags(fn) != mf2.flags(fn) or \
805 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
802 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
806 modified.append(fn)
803 modified.append(fn)
807 elif list_clean:
804 elif list_clean:
808 clean.append(fn)
805 clean.append(fn)
809 del mf1[fn]
806 del mf1[fn]
810 else:
807 else:
811 added.append(fn)
808 added.append(fn)
812
809
813 removed = mf1.keys()
810 removed = mf1.keys()
814
811
815 # sort and return results:
812 # sort and return results:
816 for l in modified, added, removed, deleted, unknown, ignored, clean:
813 for l in modified, added, removed, deleted, unknown, ignored, clean:
817 l.sort()
814 l.sort()
818 return (modified, added, removed, deleted, unknown, ignored, clean)
815 return (modified, added, removed, deleted, unknown, ignored, clean)
819
816
820 def add(self, list, wlock=None):
817 def add(self, list, wlock=None):
821 if not wlock:
818 if not wlock:
822 wlock = self.wlock()
819 wlock = self.wlock()
823 for f in list:
820 for f in list:
824 p = self.wjoin(f)
821 p = self.wjoin(f)
825 if not os.path.exists(p):
822 if not os.path.exists(p):
826 self.ui.warn(_("%s does not exist!\n") % f)
823 self.ui.warn(_("%s does not exist!\n") % f)
827 elif not os.path.isfile(p):
824 elif not os.path.isfile(p):
828 self.ui.warn(_("%s not added: only files supported currently\n")
825 self.ui.warn(_("%s not added: only files supported currently\n")
829 % f)
826 % f)
830 elif self.dirstate.state(f) in 'an':
827 elif self.dirstate.state(f) in 'an':
831 self.ui.warn(_("%s already tracked!\n") % f)
828 self.ui.warn(_("%s already tracked!\n") % f)
832 else:
829 else:
833 self.dirstate.update([f], "a")
830 self.dirstate.update([f], "a")
834
831
835 def forget(self, list, wlock=None):
832 def forget(self, list, wlock=None):
836 if not wlock:
833 if not wlock:
837 wlock = self.wlock()
834 wlock = self.wlock()
838 for f in list:
835 for f in list:
839 if self.dirstate.state(f) not in 'ai':
836 if self.dirstate.state(f) not in 'ai':
840 self.ui.warn(_("%s not added!\n") % f)
837 self.ui.warn(_("%s not added!\n") % f)
841 else:
838 else:
842 self.dirstate.forget([f])
839 self.dirstate.forget([f])
843
840
844 def remove(self, list, unlink=False, wlock=None):
841 def remove(self, list, unlink=False, wlock=None):
845 if unlink:
842 if unlink:
846 for f in list:
843 for f in list:
847 try:
844 try:
848 util.unlink(self.wjoin(f))
845 util.unlink(self.wjoin(f))
849 except OSError, inst:
846 except OSError, inst:
850 if inst.errno != errno.ENOENT:
847 if inst.errno != errno.ENOENT:
851 raise
848 raise
852 if not wlock:
849 if not wlock:
853 wlock = self.wlock()
850 wlock = self.wlock()
854 for f in list:
851 for f in list:
855 p = self.wjoin(f)
852 p = self.wjoin(f)
856 if os.path.exists(p):
853 if os.path.exists(p):
857 self.ui.warn(_("%s still exists!\n") % f)
854 self.ui.warn(_("%s still exists!\n") % f)
858 elif self.dirstate.state(f) == 'a':
855 elif self.dirstate.state(f) == 'a':
859 self.dirstate.forget([f])
856 self.dirstate.forget([f])
860 elif f not in self.dirstate:
857 elif f not in self.dirstate:
861 self.ui.warn(_("%s not tracked!\n") % f)
858 self.ui.warn(_("%s not tracked!\n") % f)
862 else:
859 else:
863 self.dirstate.update([f], "r")
860 self.dirstate.update([f], "r")
864
861
865 def undelete(self, list, wlock=None):
862 def undelete(self, list, wlock=None):
866 p = self.dirstate.parents()[0]
863 p = self.dirstate.parents()[0]
867 mn = self.changelog.read(p)[0]
864 mn = self.changelog.read(p)[0]
868 m = self.manifest.read(mn)
865 m = self.manifest.read(mn)
869 if not wlock:
866 if not wlock:
870 wlock = self.wlock()
867 wlock = self.wlock()
871 for f in list:
868 for f in list:
872 if self.dirstate.state(f) not in "r":
869 if self.dirstate.state(f) not in "r":
873 self.ui.warn("%s not removed!\n" % f)
870 self.ui.warn("%s not removed!\n" % f)
874 else:
871 else:
875 t = self.file(f).read(m[f])
872 t = self.file(f).read(m[f])
876 self.wwrite(f, t)
873 self.wwrite(f, t)
877 util.set_exec(self.wjoin(f), m.execf(f))
874 util.set_exec(self.wjoin(f), m.execf(f))
878 self.dirstate.update([f], "n")
875 self.dirstate.update([f], "n")
879
876
880 def copy(self, source, dest, wlock=None):
877 def copy(self, source, dest, wlock=None):
881 p = self.wjoin(dest)
878 p = self.wjoin(dest)
882 if not os.path.exists(p):
879 if not os.path.exists(p):
883 self.ui.warn(_("%s does not exist!\n") % dest)
880 self.ui.warn(_("%s does not exist!\n") % dest)
884 elif not os.path.isfile(p):
881 elif not os.path.isfile(p):
885 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
882 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
886 else:
883 else:
887 if not wlock:
884 if not wlock:
888 wlock = self.wlock()
885 wlock = self.wlock()
889 if self.dirstate.state(dest) == '?':
886 if self.dirstate.state(dest) == '?':
890 self.dirstate.update([dest], "a")
887 self.dirstate.update([dest], "a")
891 self.dirstate.copy(source, dest)
888 self.dirstate.copy(source, dest)
892
889
893 def heads(self, start=None):
890 def heads(self, start=None):
894 heads = self.changelog.heads(start)
891 heads = self.changelog.heads(start)
895 # sort the output in rev descending order
892 # sort the output in rev descending order
896 heads = [(-self.changelog.rev(h), h) for h in heads]
893 heads = [(-self.changelog.rev(h), h) for h in heads]
897 heads.sort()
894 heads.sort()
898 return [n for (r, n) in heads]
895 return [n for (r, n) in heads]
899
896
900 # branchlookup returns a dict giving a list of branches for
897 # branchlookup returns a dict giving a list of branches for
901 # each head. A branch is defined as the tag of a node or
898 # each head. A branch is defined as the tag of a node or
902 # the branch of the node's parents. If a node has multiple
899 # the branch of the node's parents. If a node has multiple
903 # branch tags, tags are eliminated if they are visible from other
900 # branch tags, tags are eliminated if they are visible from other
904 # branch tags.
901 # branch tags.
905 #
902 #
906 # So, for this graph: a->b->c->d->e
903 # So, for this graph: a->b->c->d->e
907 # \ /
904 # \ /
908 # aa -----/
905 # aa -----/
909 # a has tag 2.6.12
906 # a has tag 2.6.12
910 # d has tag 2.6.13
907 # d has tag 2.6.13
911 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
908 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
912 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
909 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
913 # from the list.
910 # from the list.
914 #
911 #
915 # It is possible that more than one head will have the same branch tag.
912 # It is possible that more than one head will have the same branch tag.
916 # callers need to check the result for multiple heads under the same
913 # callers need to check the result for multiple heads under the same
917 # branch tag if that is a problem for them (ie checkout of a specific
914 # branch tag if that is a problem for them (ie checkout of a specific
918 # branch).
915 # branch).
919 #
916 #
920 # passing in a specific branch will limit the depth of the search
917 # passing in a specific branch will limit the depth of the search
921 # through the parents. It won't limit the branches returned in the
918 # through the parents. It won't limit the branches returned in the
922 # result though.
919 # result though.
923 def branchlookup(self, heads=None, branch=None):
920 def branchlookup(self, heads=None, branch=None):
924 if not heads:
921 if not heads:
925 heads = self.heads()
922 heads = self.heads()
926 headt = [ h for h in heads ]
923 headt = [ h for h in heads ]
927 chlog = self.changelog
924 chlog = self.changelog
928 branches = {}
925 branches = {}
929 merges = []
926 merges = []
930 seenmerge = {}
927 seenmerge = {}
931
928
932 # traverse the tree once for each head, recording in the branches
929 # traverse the tree once for each head, recording in the branches
933 # dict which tags are visible from this head. The branches
930 # dict which tags are visible from this head. The branches
934 # dict also records which tags are visible from each tag
931 # dict also records which tags are visible from each tag
935 # while we traverse.
932 # while we traverse.
936 while headt or merges:
933 while headt or merges:
937 if merges:
934 if merges:
938 n, found = merges.pop()
935 n, found = merges.pop()
939 visit = [n]
936 visit = [n]
940 else:
937 else:
941 h = headt.pop()
938 h = headt.pop()
942 visit = [h]
939 visit = [h]
943 found = [h]
940 found = [h]
944 seen = {}
941 seen = {}
945 while visit:
942 while visit:
946 n = visit.pop()
943 n = visit.pop()
947 if n in seen:
944 if n in seen:
948 continue
945 continue
949 pp = chlog.parents(n)
946 pp = chlog.parents(n)
950 tags = self.nodetags(n)
947 tags = self.nodetags(n)
951 if tags:
948 if tags:
952 for x in tags:
949 for x in tags:
953 if x == 'tip':
950 if x == 'tip':
954 continue
951 continue
955 for f in found:
952 for f in found:
956 branches.setdefault(f, {})[n] = 1
953 branches.setdefault(f, {})[n] = 1
957 branches.setdefault(n, {})[n] = 1
954 branches.setdefault(n, {})[n] = 1
958 break
955 break
959 if n not in found:
956 if n not in found:
960 found.append(n)
957 found.append(n)
961 if branch in tags:
958 if branch in tags:
962 continue
959 continue
963 seen[n] = 1
960 seen[n] = 1
964 if pp[1] != nullid and n not in seenmerge:
961 if pp[1] != nullid and n not in seenmerge:
965 merges.append((pp[1], [x for x in found]))
962 merges.append((pp[1], [x for x in found]))
966 seenmerge[n] = 1
963 seenmerge[n] = 1
967 if pp[0] != nullid:
964 if pp[0] != nullid:
968 visit.append(pp[0])
965 visit.append(pp[0])
969 # traverse the branches dict, eliminating branch tags from each
966 # traverse the branches dict, eliminating branch tags from each
970 # head that are visible from another branch tag for that head.
967 # head that are visible from another branch tag for that head.
971 out = {}
968 out = {}
972 viscache = {}
969 viscache = {}
973 for h in heads:
970 for h in heads:
974 def visible(node):
971 def visible(node):
975 if node in viscache:
972 if node in viscache:
976 return viscache[node]
973 return viscache[node]
977 ret = {}
974 ret = {}
978 visit = [node]
975 visit = [node]
979 while visit:
976 while visit:
980 x = visit.pop()
977 x = visit.pop()
981 if x in viscache:
978 if x in viscache:
982 ret.update(viscache[x])
979 ret.update(viscache[x])
983 elif x not in ret:
980 elif x not in ret:
984 ret[x] = 1
981 ret[x] = 1
985 if x in branches:
982 if x in branches:
986 visit[len(visit):] = branches[x].keys()
983 visit[len(visit):] = branches[x].keys()
987 viscache[node] = ret
984 viscache[node] = ret
988 return ret
985 return ret
989 if h not in branches:
986 if h not in branches:
990 continue
987 continue
991 # O(n^2), but somewhat limited. This only searches the
988 # O(n^2), but somewhat limited. This only searches the
992 # tags visible from a specific head, not all the tags in the
989 # tags visible from a specific head, not all the tags in the
993 # whole repo.
990 # whole repo.
994 for b in branches[h]:
991 for b in branches[h]:
995 vis = False
992 vis = False
996 for bb in branches[h].keys():
993 for bb in branches[h].keys():
997 if b != bb:
994 if b != bb:
998 if b in visible(bb):
995 if b in visible(bb):
999 vis = True
996 vis = True
1000 break
997 break
1001 if not vis:
998 if not vis:
1002 l = out.setdefault(h, [])
999 l = out.setdefault(h, [])
1003 l[len(l):] = self.nodetags(b)
1000 l[len(l):] = self.nodetags(b)
1004 return out
1001 return out
1005
1002
1006 def branches(self, nodes):
1003 def branches(self, nodes):
1007 if not nodes:
1004 if not nodes:
1008 nodes = [self.changelog.tip()]
1005 nodes = [self.changelog.tip()]
1009 b = []
1006 b = []
1010 for n in nodes:
1007 for n in nodes:
1011 t = n
1008 t = n
1012 while 1:
1009 while 1:
1013 p = self.changelog.parents(n)
1010 p = self.changelog.parents(n)
1014 if p[1] != nullid or p[0] == nullid:
1011 if p[1] != nullid or p[0] == nullid:
1015 b.append((t, n, p[0], p[1]))
1012 b.append((t, n, p[0], p[1]))
1016 break
1013 break
1017 n = p[0]
1014 n = p[0]
1018 return b
1015 return b
1019
1016
1020 def between(self, pairs):
1017 def between(self, pairs):
1021 r = []
1018 r = []
1022
1019
1023 for top, bottom in pairs:
1020 for top, bottom in pairs:
1024 n, l, i = top, [], 0
1021 n, l, i = top, [], 0
1025 f = 1
1022 f = 1
1026
1023
1027 while n != bottom:
1024 while n != bottom:
1028 p = self.changelog.parents(n)[0]
1025 p = self.changelog.parents(n)[0]
1029 if i == f:
1026 if i == f:
1030 l.append(n)
1027 l.append(n)
1031 f = f * 2
1028 f = f * 2
1032 n = p
1029 n = p
1033 i += 1
1030 i += 1
1034
1031
1035 r.append(l)
1032 r.append(l)
1036
1033
1037 return r
1034 return r
1038
1035
1039 def findincoming(self, remote, base=None, heads=None, force=False):
1036 def findincoming(self, remote, base=None, heads=None, force=False):
1040 """Return list of roots of the subsets of missing nodes from remote
1037 """Return list of roots of the subsets of missing nodes from remote
1041
1038
1042 If base dict is specified, assume that these nodes and their parents
1039 If base dict is specified, assume that these nodes and their parents
1043 exist on the remote side and that no child of a node of base exists
1040 exist on the remote side and that no child of a node of base exists
1044 in both remote and self.
1041 in both remote and self.
1045 Furthermore base will be updated to include the nodes that exists
1042 Furthermore base will be updated to include the nodes that exists
1046 in self and remote but no children exists in self and remote.
1043 in self and remote but no children exists in self and remote.
1047 If a list of heads is specified, return only nodes which are heads
1044 If a list of heads is specified, return only nodes which are heads
1048 or ancestors of these heads.
1045 or ancestors of these heads.
1049
1046
1050 All the ancestors of base are in self and in remote.
1047 All the ancestors of base are in self and in remote.
1051 All the descendants of the list returned are missing in self.
1048 All the descendants of the list returned are missing in self.
1052 (and so we know that the rest of the nodes are missing in remote, see
1049 (and so we know that the rest of the nodes are missing in remote, see
1053 outgoing)
1050 outgoing)
1054 """
1051 """
1055 m = self.changelog.nodemap
1052 m = self.changelog.nodemap
1056 search = []
1053 search = []
1057 fetch = {}
1054 fetch = {}
1058 seen = {}
1055 seen = {}
1059 seenbranch = {}
1056 seenbranch = {}
1060 if base == None:
1057 if base == None:
1061 base = {}
1058 base = {}
1062
1059
1063 if not heads:
1060 if not heads:
1064 heads = remote.heads()
1061 heads = remote.heads()
1065
1062
1066 if self.changelog.tip() == nullid:
1063 if self.changelog.tip() == nullid:
1067 base[nullid] = 1
1064 base[nullid] = 1
1068 if heads != [nullid]:
1065 if heads != [nullid]:
1069 return [nullid]
1066 return [nullid]
1070 return []
1067 return []
1071
1068
1072 # assume we're closer to the tip than the root
1069 # assume we're closer to the tip than the root
1073 # and start by examining the heads
1070 # and start by examining the heads
1074 self.ui.status(_("searching for changes\n"))
1071 self.ui.status(_("searching for changes\n"))
1075
1072
1076 unknown = []
1073 unknown = []
1077 for h in heads:
1074 for h in heads:
1078 if h not in m:
1075 if h not in m:
1079 unknown.append(h)
1076 unknown.append(h)
1080 else:
1077 else:
1081 base[h] = 1
1078 base[h] = 1
1082
1079
1083 if not unknown:
1080 if not unknown:
1084 return []
1081 return []
1085
1082
1086 req = dict.fromkeys(unknown)
1083 req = dict.fromkeys(unknown)
1087 reqcnt = 0
1084 reqcnt = 0
1088
1085
1089 # search through remote branches
1086 # search through remote branches
1090 # a 'branch' here is a linear segment of history, with four parts:
1087 # a 'branch' here is a linear segment of history, with four parts:
1091 # head, root, first parent, second parent
1088 # head, root, first parent, second parent
1092 # (a branch always has two parents (or none) by definition)
1089 # (a branch always has two parents (or none) by definition)
1093 unknown = remote.branches(unknown)
1090 unknown = remote.branches(unknown)
1094 while unknown:
1091 while unknown:
1095 r = []
1092 r = []
1096 while unknown:
1093 while unknown:
1097 n = unknown.pop(0)
1094 n = unknown.pop(0)
1098 if n[0] in seen:
1095 if n[0] in seen:
1099 continue
1096 continue
1100
1097
1101 self.ui.debug(_("examining %s:%s\n")
1098 self.ui.debug(_("examining %s:%s\n")
1102 % (short(n[0]), short(n[1])))
1099 % (short(n[0]), short(n[1])))
1103 if n[0] == nullid: # found the end of the branch
1100 if n[0] == nullid: # found the end of the branch
1104 pass
1101 pass
1105 elif n in seenbranch:
1102 elif n in seenbranch:
1106 self.ui.debug(_("branch already found\n"))
1103 self.ui.debug(_("branch already found\n"))
1107 continue
1104 continue
1108 elif n[1] and n[1] in m: # do we know the base?
1105 elif n[1] and n[1] in m: # do we know the base?
1109 self.ui.debug(_("found incomplete branch %s:%s\n")
1106 self.ui.debug(_("found incomplete branch %s:%s\n")
1110 % (short(n[0]), short(n[1])))
1107 % (short(n[0]), short(n[1])))
1111 search.append(n) # schedule branch range for scanning
1108 search.append(n) # schedule branch range for scanning
1112 seenbranch[n] = 1
1109 seenbranch[n] = 1
1113 else:
1110 else:
1114 if n[1] not in seen and n[1] not in fetch:
1111 if n[1] not in seen and n[1] not in fetch:
1115 if n[2] in m and n[3] in m:
1112 if n[2] in m and n[3] in m:
1116 self.ui.debug(_("found new changeset %s\n") %
1113 self.ui.debug(_("found new changeset %s\n") %
1117 short(n[1]))
1114 short(n[1]))
1118 fetch[n[1]] = 1 # earliest unknown
1115 fetch[n[1]] = 1 # earliest unknown
1119 for p in n[2:4]:
1116 for p in n[2:4]:
1120 if p in m:
1117 if p in m:
1121 base[p] = 1 # latest known
1118 base[p] = 1 # latest known
1122
1119
1123 for p in n[2:4]:
1120 for p in n[2:4]:
1124 if p not in req and p not in m:
1121 if p not in req and p not in m:
1125 r.append(p)
1122 r.append(p)
1126 req[p] = 1
1123 req[p] = 1
1127 seen[n[0]] = 1
1124 seen[n[0]] = 1
1128
1125
1129 if r:
1126 if r:
1130 reqcnt += 1
1127 reqcnt += 1
1131 self.ui.debug(_("request %d: %s\n") %
1128 self.ui.debug(_("request %d: %s\n") %
1132 (reqcnt, " ".join(map(short, r))))
1129 (reqcnt, " ".join(map(short, r))))
1133 for p in range(0, len(r), 10):
1130 for p in range(0, len(r), 10):
1134 for b in remote.branches(r[p:p+10]):
1131 for b in remote.branches(r[p:p+10]):
1135 self.ui.debug(_("received %s:%s\n") %
1132 self.ui.debug(_("received %s:%s\n") %
1136 (short(b[0]), short(b[1])))
1133 (short(b[0]), short(b[1])))
1137 unknown.append(b)
1134 unknown.append(b)
1138
1135
1139 # do binary search on the branches we found
1136 # do binary search on the branches we found
1140 while search:
1137 while search:
1141 n = search.pop(0)
1138 n = search.pop(0)
1142 reqcnt += 1
1139 reqcnt += 1
1143 l = remote.between([(n[0], n[1])])[0]
1140 l = remote.between([(n[0], n[1])])[0]
1144 l.append(n[1])
1141 l.append(n[1])
1145 p = n[0]
1142 p = n[0]
1146 f = 1
1143 f = 1
1147 for i in l:
1144 for i in l:
1148 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1145 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1149 if i in m:
1146 if i in m:
1150 if f <= 2:
1147 if f <= 2:
1151 self.ui.debug(_("found new branch changeset %s\n") %
1148 self.ui.debug(_("found new branch changeset %s\n") %
1152 short(p))
1149 short(p))
1153 fetch[p] = 1
1150 fetch[p] = 1
1154 base[i] = 1
1151 base[i] = 1
1155 else:
1152 else:
1156 self.ui.debug(_("narrowed branch search to %s:%s\n")
1153 self.ui.debug(_("narrowed branch search to %s:%s\n")
1157 % (short(p), short(i)))
1154 % (short(p), short(i)))
1158 search.append((p, i))
1155 search.append((p, i))
1159 break
1156 break
1160 p, f = i, f * 2
1157 p, f = i, f * 2
1161
1158
1162 # sanity check our fetch list
1159 # sanity check our fetch list
1163 for f in fetch.keys():
1160 for f in fetch.keys():
1164 if f in m:
1161 if f in m:
1165 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1162 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1166
1163
1167 if base.keys() == [nullid]:
1164 if base.keys() == [nullid]:
1168 if force:
1165 if force:
1169 self.ui.warn(_("warning: repository is unrelated\n"))
1166 self.ui.warn(_("warning: repository is unrelated\n"))
1170 else:
1167 else:
1171 raise util.Abort(_("repository is unrelated"))
1168 raise util.Abort(_("repository is unrelated"))
1172
1169
1173 self.ui.debug(_("found new changesets starting at ") +
1170 self.ui.debug(_("found new changesets starting at ") +
1174 " ".join([short(f) for f in fetch]) + "\n")
1171 " ".join([short(f) for f in fetch]) + "\n")
1175
1172
1176 self.ui.debug(_("%d total queries\n") % reqcnt)
1173 self.ui.debug(_("%d total queries\n") % reqcnt)
1177
1174
1178 return fetch.keys()
1175 return fetch.keys()
1179
1176
1180 def findoutgoing(self, remote, base=None, heads=None, force=False):
1177 def findoutgoing(self, remote, base=None, heads=None, force=False):
1181 """Return list of nodes that are roots of subsets not in remote
1178 """Return list of nodes that are roots of subsets not in remote
1182
1179
1183 If base dict is specified, assume that these nodes and their parents
1180 If base dict is specified, assume that these nodes and their parents
1184 exist on the remote side.
1181 exist on the remote side.
1185 If a list of heads is specified, return only nodes which are heads
1182 If a list of heads is specified, return only nodes which are heads
1186 or ancestors of these heads, and return a second element which
1183 or ancestors of these heads, and return a second element which
1187 contains all remote heads which get new children.
1184 contains all remote heads which get new children.
1188 """
1185 """
1189 if base == None:
1186 if base == None:
1190 base = {}
1187 base = {}
1191 self.findincoming(remote, base, heads, force=force)
1188 self.findincoming(remote, base, heads, force=force)
1192
1189
1193 self.ui.debug(_("common changesets up to ")
1190 self.ui.debug(_("common changesets up to ")
1194 + " ".join(map(short, base.keys())) + "\n")
1191 + " ".join(map(short, base.keys())) + "\n")
1195
1192
1196 remain = dict.fromkeys(self.changelog.nodemap)
1193 remain = dict.fromkeys(self.changelog.nodemap)
1197
1194
1198 # prune everything remote has from the tree
1195 # prune everything remote has from the tree
1199 del remain[nullid]
1196 del remain[nullid]
1200 remove = base.keys()
1197 remove = base.keys()
1201 while remove:
1198 while remove:
1202 n = remove.pop(0)
1199 n = remove.pop(0)
1203 if n in remain:
1200 if n in remain:
1204 del remain[n]
1201 del remain[n]
1205 for p in self.changelog.parents(n):
1202 for p in self.changelog.parents(n):
1206 remove.append(p)
1203 remove.append(p)
1207
1204
1208 # find every node whose parents have been pruned
1205 # find every node whose parents have been pruned
1209 subset = []
1206 subset = []
1210 # find every remote head that will get new children
1207 # find every remote head that will get new children
1211 updated_heads = {}
1208 updated_heads = {}
1212 for n in remain:
1209 for n in remain:
1213 p1, p2 = self.changelog.parents(n)
1210 p1, p2 = self.changelog.parents(n)
1214 if p1 not in remain and p2 not in remain:
1211 if p1 not in remain and p2 not in remain:
1215 subset.append(n)
1212 subset.append(n)
1216 if heads:
1213 if heads:
1217 if p1 in heads:
1214 if p1 in heads:
1218 updated_heads[p1] = True
1215 updated_heads[p1] = True
1219 if p2 in heads:
1216 if p2 in heads:
1220 updated_heads[p2] = True
1217 updated_heads[p2] = True
1221
1218
1222 # this is the set of all roots we have to push
1219 # this is the set of all roots we have to push
1223 if heads:
1220 if heads:
1224 return subset, updated_heads.keys()
1221 return subset, updated_heads.keys()
1225 else:
1222 else:
1226 return subset
1223 return subset
1227
1224
1228 def pull(self, remote, heads=None, force=False, lock=None):
1225 def pull(self, remote, heads=None, force=False, lock=None):
1229 mylock = False
1226 mylock = False
1230 if not lock:
1227 if not lock:
1231 lock = self.lock()
1228 lock = self.lock()
1232 mylock = True
1229 mylock = True
1233
1230
1234 try:
1231 try:
1235 fetch = self.findincoming(remote, force=force)
1232 fetch = self.findincoming(remote, force=force)
1236 if fetch == [nullid]:
1233 if fetch == [nullid]:
1237 self.ui.status(_("requesting all changes\n"))
1234 self.ui.status(_("requesting all changes\n"))
1238
1235
1239 if not fetch:
1236 if not fetch:
1240 self.ui.status(_("no changes found\n"))
1237 self.ui.status(_("no changes found\n"))
1241 return 0
1238 return 0
1242
1239
1243 if heads is None:
1240 if heads is None:
1244 cg = remote.changegroup(fetch, 'pull')
1241 cg = remote.changegroup(fetch, 'pull')
1245 else:
1242 else:
1246 cg = remote.changegroupsubset(fetch, heads, 'pull')
1243 cg = remote.changegroupsubset(fetch, heads, 'pull')
1247 return self.addchangegroup(cg, 'pull', remote.url())
1244 return self.addchangegroup(cg, 'pull', remote.url())
1248 finally:
1245 finally:
1249 if mylock:
1246 if mylock:
1250 lock.release()
1247 lock.release()
1251
1248
1252 def push(self, remote, force=False, revs=None):
1249 def push(self, remote, force=False, revs=None):
1253 # there are two ways to push to remote repo:
1250 # there are two ways to push to remote repo:
1254 #
1251 #
1255 # addchangegroup assumes local user can lock remote
1252 # addchangegroup assumes local user can lock remote
1256 # repo (local filesystem, old ssh servers).
1253 # repo (local filesystem, old ssh servers).
1257 #
1254 #
1258 # unbundle assumes local user cannot lock remote repo (new ssh
1255 # unbundle assumes local user cannot lock remote repo (new ssh
1259 # servers, http servers).
1256 # servers, http servers).
1260
1257
1261 if remote.capable('unbundle'):
1258 if remote.capable('unbundle'):
1262 return self.push_unbundle(remote, force, revs)
1259 return self.push_unbundle(remote, force, revs)
1263 return self.push_addchangegroup(remote, force, revs)
1260 return self.push_addchangegroup(remote, force, revs)
1264
1261
1265 def prepush(self, remote, force, revs):
1262 def prepush(self, remote, force, revs):
1266 base = {}
1263 base = {}
1267 remote_heads = remote.heads()
1264 remote_heads = remote.heads()
1268 inc = self.findincoming(remote, base, remote_heads, force=force)
1265 inc = self.findincoming(remote, base, remote_heads, force=force)
1269 if not force and inc:
1266 if not force and inc:
1270 self.ui.warn(_("abort: unsynced remote changes!\n"))
1267 self.ui.warn(_("abort: unsynced remote changes!\n"))
1271 self.ui.status(_("(did you forget to sync?"
1268 self.ui.status(_("(did you forget to sync?"
1272 " use push -f to force)\n"))
1269 " use push -f to force)\n"))
1273 return None, 1
1270 return None, 1
1274
1271
1275 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1272 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1276 if revs is not None:
1273 if revs is not None:
1277 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1274 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1278 else:
1275 else:
1279 bases, heads = update, self.changelog.heads()
1276 bases, heads = update, self.changelog.heads()
1280
1277
1281 if not bases:
1278 if not bases:
1282 self.ui.status(_("no changes found\n"))
1279 self.ui.status(_("no changes found\n"))
1283 return None, 1
1280 return None, 1
1284 elif not force:
1281 elif not force:
1285 # FIXME we don't properly detect creation of new heads
1282 # FIXME we don't properly detect creation of new heads
1286 # in the push -r case, assume the user knows what he's doing
1283 # in the push -r case, assume the user knows what he's doing
1287 if not revs and len(remote_heads) < len(heads) \
1284 if not revs and len(remote_heads) < len(heads) \
1288 and remote_heads != [nullid]:
1285 and remote_heads != [nullid]:
1289 self.ui.warn(_("abort: push creates new remote branches!\n"))
1286 self.ui.warn(_("abort: push creates new remote branches!\n"))
1290 self.ui.status(_("(did you forget to merge?"
1287 self.ui.status(_("(did you forget to merge?"
1291 " use push -f to force)\n"))
1288 " use push -f to force)\n"))
1292 return None, 1
1289 return None, 1
1293
1290
1294 if revs is None:
1291 if revs is None:
1295 cg = self.changegroup(update, 'push')
1292 cg = self.changegroup(update, 'push')
1296 else:
1293 else:
1297 cg = self.changegroupsubset(update, revs, 'push')
1294 cg = self.changegroupsubset(update, revs, 'push')
1298 return cg, remote_heads
1295 return cg, remote_heads
1299
1296
1300 def push_addchangegroup(self, remote, force, revs):
1297 def push_addchangegroup(self, remote, force, revs):
1301 lock = remote.lock()
1298 lock = remote.lock()
1302
1299
1303 ret = self.prepush(remote, force, revs)
1300 ret = self.prepush(remote, force, revs)
1304 if ret[0] is not None:
1301 if ret[0] is not None:
1305 cg, remote_heads = ret
1302 cg, remote_heads = ret
1306 return remote.addchangegroup(cg, 'push', self.url())
1303 return remote.addchangegroup(cg, 'push', self.url())
1307 return ret[1]
1304 return ret[1]
1308
1305
1309 def push_unbundle(self, remote, force, revs):
1306 def push_unbundle(self, remote, force, revs):
1310 # local repo finds heads on server, finds out what revs it
1307 # local repo finds heads on server, finds out what revs it
1311 # must push. once revs transferred, if server finds it has
1308 # must push. once revs transferred, if server finds it has
1312 # different heads (someone else won commit/push race), server
1309 # different heads (someone else won commit/push race), server
1313 # aborts.
1310 # aborts.
1314
1311
1315 ret = self.prepush(remote, force, revs)
1312 ret = self.prepush(remote, force, revs)
1316 if ret[0] is not None:
1313 if ret[0] is not None:
1317 cg, remote_heads = ret
1314 cg, remote_heads = ret
1318 if force: remote_heads = ['force']
1315 if force: remote_heads = ['force']
1319 return remote.unbundle(cg, remote_heads, 'push')
1316 return remote.unbundle(cg, remote_heads, 'push')
1320 return ret[1]
1317 return ret[1]
1321
1318
1322 def changegroupsubset(self, bases, heads, source):
1319 def changegroupsubset(self, bases, heads, source):
1323 """This function generates a changegroup consisting of all the nodes
1320 """This function generates a changegroup consisting of all the nodes
1324 that are descendents of any of the bases, and ancestors of any of
1321 that are descendents of any of the bases, and ancestors of any of
1325 the heads.
1322 the heads.
1326
1323
1327 It is fairly complex as determining which filenodes and which
1324 It is fairly complex as determining which filenodes and which
1328 manifest nodes need to be included for the changeset to be complete
1325 manifest nodes need to be included for the changeset to be complete
1329 is non-trivial.
1326 is non-trivial.
1330
1327
1331 Another wrinkle is doing the reverse, figuring out which changeset in
1328 Another wrinkle is doing the reverse, figuring out which changeset in
1332 the changegroup a particular filenode or manifestnode belongs to."""
1329 the changegroup a particular filenode or manifestnode belongs to."""
1333
1330
1334 self.hook('preoutgoing', throw=True, source=source)
1331 self.hook('preoutgoing', throw=True, source=source)
1335
1332
1336 # Set up some initial variables
1333 # Set up some initial variables
1337 # Make it easy to refer to self.changelog
1334 # Make it easy to refer to self.changelog
1338 cl = self.changelog
1335 cl = self.changelog
1339 # msng is short for missing - compute the list of changesets in this
1336 # msng is short for missing - compute the list of changesets in this
1340 # changegroup.
1337 # changegroup.
1341 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1338 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1342 # Some bases may turn out to be superfluous, and some heads may be
1339 # Some bases may turn out to be superfluous, and some heads may be
1343 # too. nodesbetween will return the minimal set of bases and heads
1340 # too. nodesbetween will return the minimal set of bases and heads
1344 # necessary to re-create the changegroup.
1341 # necessary to re-create the changegroup.
1345
1342
1346 # Known heads are the list of heads that it is assumed the recipient
1343 # Known heads are the list of heads that it is assumed the recipient
1347 # of this changegroup will know about.
1344 # of this changegroup will know about.
1348 knownheads = {}
1345 knownheads = {}
1349 # We assume that all parents of bases are known heads.
1346 # We assume that all parents of bases are known heads.
1350 for n in bases:
1347 for n in bases:
1351 for p in cl.parents(n):
1348 for p in cl.parents(n):
1352 if p != nullid:
1349 if p != nullid:
1353 knownheads[p] = 1
1350 knownheads[p] = 1
1354 knownheads = knownheads.keys()
1351 knownheads = knownheads.keys()
1355 if knownheads:
1352 if knownheads:
1356 # Now that we know what heads are known, we can compute which
1353 # Now that we know what heads are known, we can compute which
1357 # changesets are known. The recipient must know about all
1354 # changesets are known. The recipient must know about all
1358 # changesets required to reach the known heads from the null
1355 # changesets required to reach the known heads from the null
1359 # changeset.
1356 # changeset.
1360 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1357 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1361 junk = None
1358 junk = None
1362 # Transform the list into an ersatz set.
1359 # Transform the list into an ersatz set.
1363 has_cl_set = dict.fromkeys(has_cl_set)
1360 has_cl_set = dict.fromkeys(has_cl_set)
1364 else:
1361 else:
1365 # If there were no known heads, the recipient cannot be assumed to
1362 # If there were no known heads, the recipient cannot be assumed to
1366 # know about any changesets.
1363 # know about any changesets.
1367 has_cl_set = {}
1364 has_cl_set = {}
1368
1365
1369 # Make it easy to refer to self.manifest
1366 # Make it easy to refer to self.manifest
1370 mnfst = self.manifest
1367 mnfst = self.manifest
1371 # We don't know which manifests are missing yet
1368 # We don't know which manifests are missing yet
1372 msng_mnfst_set = {}
1369 msng_mnfst_set = {}
1373 # Nor do we know which filenodes are missing.
1370 # Nor do we know which filenodes are missing.
1374 msng_filenode_set = {}
1371 msng_filenode_set = {}
1375
1372
1376 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1373 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1377 junk = None
1374 junk = None
1378
1375
1379 # A changeset always belongs to itself, so the changenode lookup
1376 # A changeset always belongs to itself, so the changenode lookup
1380 # function for a changenode is identity.
1377 # function for a changenode is identity.
1381 def identity(x):
1378 def identity(x):
1382 return x
1379 return x
1383
1380
1384 # A function generating function. Sets up an environment for the
1381 # A function generating function. Sets up an environment for the
1385 # inner function.
1382 # inner function.
1386 def cmp_by_rev_func(revlog):
1383 def cmp_by_rev_func(revlog):
1387 # Compare two nodes by their revision number in the environment's
1384 # Compare two nodes by their revision number in the environment's
1388 # revision history. Since the revision number both represents the
1385 # revision history. Since the revision number both represents the
1389 # most efficient order to read the nodes in, and represents a
1386 # most efficient order to read the nodes in, and represents a
1390 # topological sorting of the nodes, this function is often useful.
1387 # topological sorting of the nodes, this function is often useful.
1391 def cmp_by_rev(a, b):
1388 def cmp_by_rev(a, b):
1392 return cmp(revlog.rev(a), revlog.rev(b))
1389 return cmp(revlog.rev(a), revlog.rev(b))
1393 return cmp_by_rev
1390 return cmp_by_rev
1394
1391
1395 # If we determine that a particular file or manifest node must be a
1392 # If we determine that a particular file or manifest node must be a
1396 # node that the recipient of the changegroup will already have, we can
1393 # node that the recipient of the changegroup will already have, we can
1397 # also assume the recipient will have all the parents. This function
1394 # also assume the recipient will have all the parents. This function
1398 # prunes them from the set of missing nodes.
1395 # prunes them from the set of missing nodes.
1399 def prune_parents(revlog, hasset, msngset):
1396 def prune_parents(revlog, hasset, msngset):
1400 haslst = hasset.keys()
1397 haslst = hasset.keys()
1401 haslst.sort(cmp_by_rev_func(revlog))
1398 haslst.sort(cmp_by_rev_func(revlog))
1402 for node in haslst:
1399 for node in haslst:
1403 parentlst = [p for p in revlog.parents(node) if p != nullid]
1400 parentlst = [p for p in revlog.parents(node) if p != nullid]
1404 while parentlst:
1401 while parentlst:
1405 n = parentlst.pop()
1402 n = parentlst.pop()
1406 if n not in hasset:
1403 if n not in hasset:
1407 hasset[n] = 1
1404 hasset[n] = 1
1408 p = [p for p in revlog.parents(n) if p != nullid]
1405 p = [p for p in revlog.parents(n) if p != nullid]
1409 parentlst.extend(p)
1406 parentlst.extend(p)
1410 for n in hasset:
1407 for n in hasset:
1411 msngset.pop(n, None)
1408 msngset.pop(n, None)
1412
1409
1413 # This is a function generating function used to set up an environment
1410 # This is a function generating function used to set up an environment
1414 # for the inner function to execute in.
1411 # for the inner function to execute in.
1415 def manifest_and_file_collector(changedfileset):
1412 def manifest_and_file_collector(changedfileset):
1416 # This is an information gathering function that gathers
1413 # This is an information gathering function that gathers
1417 # information from each changeset node that goes out as part of
1414 # information from each changeset node that goes out as part of
1418 # the changegroup. The information gathered is a list of which
1415 # the changegroup. The information gathered is a list of which
1419 # manifest nodes are potentially required (the recipient may
1416 # manifest nodes are potentially required (the recipient may
1420 # already have them) and total list of all files which were
1417 # already have them) and total list of all files which were
1421 # changed in any changeset in the changegroup.
1418 # changed in any changeset in the changegroup.
1422 #
1419 #
1423 # We also remember the first changenode we saw any manifest
1420 # We also remember the first changenode we saw any manifest
1424 # referenced by so we can later determine which changenode 'owns'
1421 # referenced by so we can later determine which changenode 'owns'
1425 # the manifest.
1422 # the manifest.
1426 def collect_manifests_and_files(clnode):
1423 def collect_manifests_and_files(clnode):
1427 c = cl.read(clnode)
1424 c = cl.read(clnode)
1428 for f in c[3]:
1425 for f in c[3]:
1429 # This is to make sure we only have one instance of each
1426 # This is to make sure we only have one instance of each
1430 # filename string for each filename.
1427 # filename string for each filename.
1431 changedfileset.setdefault(f, f)
1428 changedfileset.setdefault(f, f)
1432 msng_mnfst_set.setdefault(c[0], clnode)
1429 msng_mnfst_set.setdefault(c[0], clnode)
1433 return collect_manifests_and_files
1430 return collect_manifests_and_files
1434
1431
1435 # Figure out which manifest nodes (of the ones we think might be part
1432 # Figure out which manifest nodes (of the ones we think might be part
1436 # of the changegroup) the recipient must know about and remove them
1433 # of the changegroup) the recipient must know about and remove them
1437 # from the changegroup.
1434 # from the changegroup.
1438 def prune_manifests():
1435 def prune_manifests():
1439 has_mnfst_set = {}
1436 has_mnfst_set = {}
1440 for n in msng_mnfst_set:
1437 for n in msng_mnfst_set:
1441 # If a 'missing' manifest thinks it belongs to a changenode
1438 # If a 'missing' manifest thinks it belongs to a changenode
1442 # the recipient is assumed to have, obviously the recipient
1439 # the recipient is assumed to have, obviously the recipient
1443 # must have that manifest.
1440 # must have that manifest.
1444 linknode = cl.node(mnfst.linkrev(n))
1441 linknode = cl.node(mnfst.linkrev(n))
1445 if linknode in has_cl_set:
1442 if linknode in has_cl_set:
1446 has_mnfst_set[n] = 1
1443 has_mnfst_set[n] = 1
1447 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1444 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1448
1445
1449 # Use the information collected in collect_manifests_and_files to say
1446 # Use the information collected in collect_manifests_and_files to say
1450 # which changenode any manifestnode belongs to.
1447 # which changenode any manifestnode belongs to.
1451 def lookup_manifest_link(mnfstnode):
1448 def lookup_manifest_link(mnfstnode):
1452 return msng_mnfst_set[mnfstnode]
1449 return msng_mnfst_set[mnfstnode]
1453
1450
1454 # A function generating function that sets up the initial environment
1451 # A function generating function that sets up the initial environment
1455 # the inner function.
1452 # the inner function.
1456 def filenode_collector(changedfiles):
1453 def filenode_collector(changedfiles):
1457 next_rev = [0]
1454 next_rev = [0]
1458 # This gathers information from each manifestnode included in the
1455 # This gathers information from each manifestnode included in the
1459 # changegroup about which filenodes the manifest node references
1456 # changegroup about which filenodes the manifest node references
1460 # so we can include those in the changegroup too.
1457 # so we can include those in the changegroup too.
1461 #
1458 #
1462 # It also remembers which changenode each filenode belongs to. It
1459 # It also remembers which changenode each filenode belongs to. It
1463 # does this by assuming the a filenode belongs to the changenode
1460 # does this by assuming the a filenode belongs to the changenode
1464 # the first manifest that references it belongs to.
1461 # the first manifest that references it belongs to.
1465 def collect_msng_filenodes(mnfstnode):
1462 def collect_msng_filenodes(mnfstnode):
1466 r = mnfst.rev(mnfstnode)
1463 r = mnfst.rev(mnfstnode)
1467 if r == next_rev[0]:
1464 if r == next_rev[0]:
1468 # If the last rev we looked at was the one just previous,
1465 # If the last rev we looked at was the one just previous,
1469 # we only need to see a diff.
1466 # we only need to see a diff.
1470 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1467 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1471 # For each line in the delta
1468 # For each line in the delta
1472 for dline in delta.splitlines():
1469 for dline in delta.splitlines():
1473 # get the filename and filenode for that line
1470 # get the filename and filenode for that line
1474 f, fnode = dline.split('\0')
1471 f, fnode = dline.split('\0')
1475 fnode = bin(fnode[:40])
1472 fnode = bin(fnode[:40])
1476 f = changedfiles.get(f, None)
1473 f = changedfiles.get(f, None)
1477 # And if the file is in the list of files we care
1474 # And if the file is in the list of files we care
1478 # about.
1475 # about.
1479 if f is not None:
1476 if f is not None:
1480 # Get the changenode this manifest belongs to
1477 # Get the changenode this manifest belongs to
1481 clnode = msng_mnfst_set[mnfstnode]
1478 clnode = msng_mnfst_set[mnfstnode]
1482 # Create the set of filenodes for the file if
1479 # Create the set of filenodes for the file if
1483 # there isn't one already.
1480 # there isn't one already.
1484 ndset = msng_filenode_set.setdefault(f, {})
1481 ndset = msng_filenode_set.setdefault(f, {})
1485 # And set the filenode's changelog node to the
1482 # And set the filenode's changelog node to the
1486 # manifest's if it hasn't been set already.
1483 # manifest's if it hasn't been set already.
1487 ndset.setdefault(fnode, clnode)
1484 ndset.setdefault(fnode, clnode)
1488 else:
1485 else:
1489 # Otherwise we need a full manifest.
1486 # Otherwise we need a full manifest.
1490 m = mnfst.read(mnfstnode)
1487 m = mnfst.read(mnfstnode)
1491 # For every file in we care about.
1488 # For every file in we care about.
1492 for f in changedfiles:
1489 for f in changedfiles:
1493 fnode = m.get(f, None)
1490 fnode = m.get(f, None)
1494 # If it's in the manifest
1491 # If it's in the manifest
1495 if fnode is not None:
1492 if fnode is not None:
1496 # See comments above.
1493 # See comments above.
1497 clnode = msng_mnfst_set[mnfstnode]
1494 clnode = msng_mnfst_set[mnfstnode]
1498 ndset = msng_filenode_set.setdefault(f, {})
1495 ndset = msng_filenode_set.setdefault(f, {})
1499 ndset.setdefault(fnode, clnode)
1496 ndset.setdefault(fnode, clnode)
1500 # Remember the revision we hope to see next.
1497 # Remember the revision we hope to see next.
1501 next_rev[0] = r + 1
1498 next_rev[0] = r + 1
1502 return collect_msng_filenodes
1499 return collect_msng_filenodes
1503
1500
1504 # We have a list of filenodes we think we need for a file, lets remove
1501 # We have a list of filenodes we think we need for a file, lets remove
1505 # all those we now the recipient must have.
1502 # all those we now the recipient must have.
1506 def prune_filenodes(f, filerevlog):
1503 def prune_filenodes(f, filerevlog):
1507 msngset = msng_filenode_set[f]
1504 msngset = msng_filenode_set[f]
1508 hasset = {}
1505 hasset = {}
1509 # If a 'missing' filenode thinks it belongs to a changenode we
1506 # If a 'missing' filenode thinks it belongs to a changenode we
1510 # assume the recipient must have, then the recipient must have
1507 # assume the recipient must have, then the recipient must have
1511 # that filenode.
1508 # that filenode.
1512 for n in msngset:
1509 for n in msngset:
1513 clnode = cl.node(filerevlog.linkrev(n))
1510 clnode = cl.node(filerevlog.linkrev(n))
1514 if clnode in has_cl_set:
1511 if clnode in has_cl_set:
1515 hasset[n] = 1
1512 hasset[n] = 1
1516 prune_parents(filerevlog, hasset, msngset)
1513 prune_parents(filerevlog, hasset, msngset)
1517
1514
1518 # A function generator function that sets up the a context for the
1515 # A function generator function that sets up the a context for the
1519 # inner function.
1516 # inner function.
1520 def lookup_filenode_link_func(fname):
1517 def lookup_filenode_link_func(fname):
1521 msngset = msng_filenode_set[fname]
1518 msngset = msng_filenode_set[fname]
1522 # Lookup the changenode the filenode belongs to.
1519 # Lookup the changenode the filenode belongs to.
1523 def lookup_filenode_link(fnode):
1520 def lookup_filenode_link(fnode):
1524 return msngset[fnode]
1521 return msngset[fnode]
1525 return lookup_filenode_link
1522 return lookup_filenode_link
1526
1523
1527 # Now that we have all theses utility functions to help out and
1524 # Now that we have all theses utility functions to help out and
1528 # logically divide up the task, generate the group.
1525 # logically divide up the task, generate the group.
1529 def gengroup():
1526 def gengroup():
1530 # The set of changed files starts empty.
1527 # The set of changed files starts empty.
1531 changedfiles = {}
1528 changedfiles = {}
1532 # Create a changenode group generator that will call our functions
1529 # Create a changenode group generator that will call our functions
1533 # back to lookup the owning changenode and collect information.
1530 # back to lookup the owning changenode and collect information.
1534 group = cl.group(msng_cl_lst, identity,
1531 group = cl.group(msng_cl_lst, identity,
1535 manifest_and_file_collector(changedfiles))
1532 manifest_and_file_collector(changedfiles))
1536 for chnk in group:
1533 for chnk in group:
1537 yield chnk
1534 yield chnk
1538
1535
1539 # The list of manifests has been collected by the generator
1536 # The list of manifests has been collected by the generator
1540 # calling our functions back.
1537 # calling our functions back.
1541 prune_manifests()
1538 prune_manifests()
1542 msng_mnfst_lst = msng_mnfst_set.keys()
1539 msng_mnfst_lst = msng_mnfst_set.keys()
1543 # Sort the manifestnodes by revision number.
1540 # Sort the manifestnodes by revision number.
1544 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1541 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1545 # Create a generator for the manifestnodes that calls our lookup
1542 # Create a generator for the manifestnodes that calls our lookup
1546 # and data collection functions back.
1543 # and data collection functions back.
1547 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1544 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1548 filenode_collector(changedfiles))
1545 filenode_collector(changedfiles))
1549 for chnk in group:
1546 for chnk in group:
1550 yield chnk
1547 yield chnk
1551
1548
1552 # These are no longer needed, dereference and toss the memory for
1549 # These are no longer needed, dereference and toss the memory for
1553 # them.
1550 # them.
1554 msng_mnfst_lst = None
1551 msng_mnfst_lst = None
1555 msng_mnfst_set.clear()
1552 msng_mnfst_set.clear()
1556
1553
1557 changedfiles = changedfiles.keys()
1554 changedfiles = changedfiles.keys()
1558 changedfiles.sort()
1555 changedfiles.sort()
1559 # Go through all our files in order sorted by name.
1556 # Go through all our files in order sorted by name.
1560 for fname in changedfiles:
1557 for fname in changedfiles:
1561 filerevlog = self.file(fname)
1558 filerevlog = self.file(fname)
1562 # Toss out the filenodes that the recipient isn't really
1559 # Toss out the filenodes that the recipient isn't really
1563 # missing.
1560 # missing.
1564 if msng_filenode_set.has_key(fname):
1561 if msng_filenode_set.has_key(fname):
1565 prune_filenodes(fname, filerevlog)
1562 prune_filenodes(fname, filerevlog)
1566 msng_filenode_lst = msng_filenode_set[fname].keys()
1563 msng_filenode_lst = msng_filenode_set[fname].keys()
1567 else:
1564 else:
1568 msng_filenode_lst = []
1565 msng_filenode_lst = []
1569 # If any filenodes are left, generate the group for them,
1566 # If any filenodes are left, generate the group for them,
1570 # otherwise don't bother.
1567 # otherwise don't bother.
1571 if len(msng_filenode_lst) > 0:
1568 if len(msng_filenode_lst) > 0:
1572 yield changegroup.genchunk(fname)
1569 yield changegroup.genchunk(fname)
1573 # Sort the filenodes by their revision #
1570 # Sort the filenodes by their revision #
1574 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1571 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1575 # Create a group generator and only pass in a changenode
1572 # Create a group generator and only pass in a changenode
1576 # lookup function as we need to collect no information
1573 # lookup function as we need to collect no information
1577 # from filenodes.
1574 # from filenodes.
1578 group = filerevlog.group(msng_filenode_lst,
1575 group = filerevlog.group(msng_filenode_lst,
1579 lookup_filenode_link_func(fname))
1576 lookup_filenode_link_func(fname))
1580 for chnk in group:
1577 for chnk in group:
1581 yield chnk
1578 yield chnk
1582 if msng_filenode_set.has_key(fname):
1579 if msng_filenode_set.has_key(fname):
1583 # Don't need this anymore, toss it to free memory.
1580 # Don't need this anymore, toss it to free memory.
1584 del msng_filenode_set[fname]
1581 del msng_filenode_set[fname]
1585 # Signal that no more groups are left.
1582 # Signal that no more groups are left.
1586 yield changegroup.closechunk()
1583 yield changegroup.closechunk()
1587
1584
1588 if msng_cl_lst:
1585 if msng_cl_lst:
1589 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1586 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1590
1587
1591 return util.chunkbuffer(gengroup())
1588 return util.chunkbuffer(gengroup())
1592
1589
1593 def changegroup(self, basenodes, source):
1590 def changegroup(self, basenodes, source):
1594 """Generate a changegroup of all nodes that we have that a recipient
1591 """Generate a changegroup of all nodes that we have that a recipient
1595 doesn't.
1592 doesn't.
1596
1593
1597 This is much easier than the previous function as we can assume that
1594 This is much easier than the previous function as we can assume that
1598 the recipient has any changenode we aren't sending them."""
1595 the recipient has any changenode we aren't sending them."""
1599
1596
1600 self.hook('preoutgoing', throw=True, source=source)
1597 self.hook('preoutgoing', throw=True, source=source)
1601
1598
1602 cl = self.changelog
1599 cl = self.changelog
1603 nodes = cl.nodesbetween(basenodes, None)[0]
1600 nodes = cl.nodesbetween(basenodes, None)[0]
1604 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1601 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1605
1602
1606 def identity(x):
1603 def identity(x):
1607 return x
1604 return x
1608
1605
1609 def gennodelst(revlog):
1606 def gennodelst(revlog):
1610 for r in xrange(0, revlog.count()):
1607 for r in xrange(0, revlog.count()):
1611 n = revlog.node(r)
1608 n = revlog.node(r)
1612 if revlog.linkrev(n) in revset:
1609 if revlog.linkrev(n) in revset:
1613 yield n
1610 yield n
1614
1611
1615 def changed_file_collector(changedfileset):
1612 def changed_file_collector(changedfileset):
1616 def collect_changed_files(clnode):
1613 def collect_changed_files(clnode):
1617 c = cl.read(clnode)
1614 c = cl.read(clnode)
1618 for fname in c[3]:
1615 for fname in c[3]:
1619 changedfileset[fname] = 1
1616 changedfileset[fname] = 1
1620 return collect_changed_files
1617 return collect_changed_files
1621
1618
1622 def lookuprevlink_func(revlog):
1619 def lookuprevlink_func(revlog):
1623 def lookuprevlink(n):
1620 def lookuprevlink(n):
1624 return cl.node(revlog.linkrev(n))
1621 return cl.node(revlog.linkrev(n))
1625 return lookuprevlink
1622 return lookuprevlink
1626
1623
1627 def gengroup():
1624 def gengroup():
1628 # construct a list of all changed files
1625 # construct a list of all changed files
1629 changedfiles = {}
1626 changedfiles = {}
1630
1627
1631 for chnk in cl.group(nodes, identity,
1628 for chnk in cl.group(nodes, identity,
1632 changed_file_collector(changedfiles)):
1629 changed_file_collector(changedfiles)):
1633 yield chnk
1630 yield chnk
1634 changedfiles = changedfiles.keys()
1631 changedfiles = changedfiles.keys()
1635 changedfiles.sort()
1632 changedfiles.sort()
1636
1633
1637 mnfst = self.manifest
1634 mnfst = self.manifest
1638 nodeiter = gennodelst(mnfst)
1635 nodeiter = gennodelst(mnfst)
1639 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1636 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1640 yield chnk
1637 yield chnk
1641
1638
1642 for fname in changedfiles:
1639 for fname in changedfiles:
1643 filerevlog = self.file(fname)
1640 filerevlog = self.file(fname)
1644 nodeiter = gennodelst(filerevlog)
1641 nodeiter = gennodelst(filerevlog)
1645 nodeiter = list(nodeiter)
1642 nodeiter = list(nodeiter)
1646 if nodeiter:
1643 if nodeiter:
1647 yield changegroup.genchunk(fname)
1644 yield changegroup.genchunk(fname)
1648 lookup = lookuprevlink_func(filerevlog)
1645 lookup = lookuprevlink_func(filerevlog)
1649 for chnk in filerevlog.group(nodeiter, lookup):
1646 for chnk in filerevlog.group(nodeiter, lookup):
1650 yield chnk
1647 yield chnk
1651
1648
1652 yield changegroup.closechunk()
1649 yield changegroup.closechunk()
1653
1650
1654 if nodes:
1651 if nodes:
1655 self.hook('outgoing', node=hex(nodes[0]), source=source)
1652 self.hook('outgoing', node=hex(nodes[0]), source=source)
1656
1653
1657 return util.chunkbuffer(gengroup())
1654 return util.chunkbuffer(gengroup())
1658
1655
1659 def addchangegroup(self, source, srctype, url):
1656 def addchangegroup(self, source, srctype, url):
1660 """add changegroup to repo.
1657 """add changegroup to repo.
1661 returns number of heads modified or added + 1."""
1658 returns number of heads modified or added + 1."""
1662
1659
1663 def csmap(x):
1660 def csmap(x):
1664 self.ui.debug(_("add changeset %s\n") % short(x))
1661 self.ui.debug(_("add changeset %s\n") % short(x))
1665 return cl.count()
1662 return cl.count()
1666
1663
1667 def revmap(x):
1664 def revmap(x):
1668 return cl.rev(x)
1665 return cl.rev(x)
1669
1666
1670 if not source:
1667 if not source:
1671 return 0
1668 return 0
1672
1669
1673 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1670 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1674
1671
1675 changesets = files = revisions = 0
1672 changesets = files = revisions = 0
1676
1673
1677 tr = self.transaction()
1674 tr = self.transaction()
1678
1675
1679 # write changelog data to temp files so concurrent readers will not see
1676 # write changelog data to temp files so concurrent readers will not see
1680 # inconsistent view
1677 # inconsistent view
1681 cl = None
1678 cl = None
1682 try:
1679 try:
1683 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1680 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1684
1681
1685 oldheads = len(cl.heads())
1682 oldheads = len(cl.heads())
1686
1683
1687 # pull off the changeset group
1684 # pull off the changeset group
1688 self.ui.status(_("adding changesets\n"))
1685 self.ui.status(_("adding changesets\n"))
1689 cor = cl.count() - 1
1686 cor = cl.count() - 1
1690 chunkiter = changegroup.chunkiter(source)
1687 chunkiter = changegroup.chunkiter(source)
1691 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1688 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1692 raise util.Abort(_("received changelog group is empty"))
1689 raise util.Abort(_("received changelog group is empty"))
1693 cnr = cl.count() - 1
1690 cnr = cl.count() - 1
1694 changesets = cnr - cor
1691 changesets = cnr - cor
1695
1692
1696 # pull off the manifest group
1693 # pull off the manifest group
1697 self.ui.status(_("adding manifests\n"))
1694 self.ui.status(_("adding manifests\n"))
1698 chunkiter = changegroup.chunkiter(source)
1695 chunkiter = changegroup.chunkiter(source)
1699 # no need to check for empty manifest group here:
1696 # no need to check for empty manifest group here:
1700 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1697 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1701 # no new manifest will be created and the manifest group will
1698 # no new manifest will be created and the manifest group will
1702 # be empty during the pull
1699 # be empty during the pull
1703 self.manifest.addgroup(chunkiter, revmap, tr)
1700 self.manifest.addgroup(chunkiter, revmap, tr)
1704
1701
1705 # process the files
1702 # process the files
1706 self.ui.status(_("adding file changes\n"))
1703 self.ui.status(_("adding file changes\n"))
1707 while 1:
1704 while 1:
1708 f = changegroup.getchunk(source)
1705 f = changegroup.getchunk(source)
1709 if not f:
1706 if not f:
1710 break
1707 break
1711 self.ui.debug(_("adding %s revisions\n") % f)
1708 self.ui.debug(_("adding %s revisions\n") % f)
1712 fl = self.file(f)
1709 fl = self.file(f)
1713 o = fl.count()
1710 o = fl.count()
1714 chunkiter = changegroup.chunkiter(source)
1711 chunkiter = changegroup.chunkiter(source)
1715 if fl.addgroup(chunkiter, revmap, tr) is None:
1712 if fl.addgroup(chunkiter, revmap, tr) is None:
1716 raise util.Abort(_("received file revlog group is empty"))
1713 raise util.Abort(_("received file revlog group is empty"))
1717 revisions += fl.count() - o
1714 revisions += fl.count() - o
1718 files += 1
1715 files += 1
1719
1716
1720 cl.writedata()
1717 cl.writedata()
1721 finally:
1718 finally:
1722 if cl:
1719 if cl:
1723 cl.cleanup()
1720 cl.cleanup()
1724
1721
1725 # make changelog see real files again
1722 # make changelog see real files again
1726 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1723 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1727 self.changelog.checkinlinesize(tr)
1724 self.changelog.checkinlinesize(tr)
1728
1725
1729 newheads = len(self.changelog.heads())
1726 newheads = len(self.changelog.heads())
1730 heads = ""
1727 heads = ""
1731 if oldheads and newheads != oldheads:
1728 if oldheads and newheads != oldheads:
1732 heads = _(" (%+d heads)") % (newheads - oldheads)
1729 heads = _(" (%+d heads)") % (newheads - oldheads)
1733
1730
1734 self.ui.status(_("added %d changesets"
1731 self.ui.status(_("added %d changesets"
1735 " with %d changes to %d files%s\n")
1732 " with %d changes to %d files%s\n")
1736 % (changesets, revisions, files, heads))
1733 % (changesets, revisions, files, heads))
1737
1734
1738 if changesets > 0:
1735 if changesets > 0:
1739 self.hook('pretxnchangegroup', throw=True,
1736 self.hook('pretxnchangegroup', throw=True,
1740 node=hex(self.changelog.node(cor+1)), source=srctype,
1737 node=hex(self.changelog.node(cor+1)), source=srctype,
1741 url=url)
1738 url=url)
1742
1739
1743 tr.close()
1740 tr.close()
1744
1741
1745 if changesets > 0:
1742 if changesets > 0:
1746 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1743 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1747 source=srctype, url=url)
1744 source=srctype, url=url)
1748
1745
1749 for i in range(cor + 1, cnr + 1):
1746 for i in range(cor + 1, cnr + 1):
1750 self.hook("incoming", node=hex(self.changelog.node(i)),
1747 self.hook("incoming", node=hex(self.changelog.node(i)),
1751 source=srctype, url=url)
1748 source=srctype, url=url)
1752
1749
1753 return newheads - oldheads + 1
1750 return newheads - oldheads + 1
1754
1751
1755
1752
1756 def stream_in(self, remote):
1753 def stream_in(self, remote):
1757 fp = remote.stream_out()
1754 fp = remote.stream_out()
1758 resp = int(fp.readline())
1755 resp = int(fp.readline())
1759 if resp != 0:
1756 if resp != 0:
1760 raise util.Abort(_('operation forbidden by server'))
1757 raise util.Abort(_('operation forbidden by server'))
1761 self.ui.status(_('streaming all changes\n'))
1758 self.ui.status(_('streaming all changes\n'))
1762 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1759 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1763 self.ui.status(_('%d files to transfer, %s of data\n') %
1760 self.ui.status(_('%d files to transfer, %s of data\n') %
1764 (total_files, util.bytecount(total_bytes)))
1761 (total_files, util.bytecount(total_bytes)))
1765 start = time.time()
1762 start = time.time()
1766 for i in xrange(total_files):
1763 for i in xrange(total_files):
1767 name, size = fp.readline().split('\0', 1)
1764 name, size = fp.readline().split('\0', 1)
1768 size = int(size)
1765 size = int(size)
1769 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1766 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1770 ofp = self.opener(name, 'w')
1767 ofp = self.opener(name, 'w')
1771 for chunk in util.filechunkiter(fp, limit=size):
1768 for chunk in util.filechunkiter(fp, limit=size):
1772 ofp.write(chunk)
1769 ofp.write(chunk)
1773 ofp.close()
1770 ofp.close()
1774 elapsed = time.time() - start
1771 elapsed = time.time() - start
1775 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1772 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1776 (util.bytecount(total_bytes), elapsed,
1773 (util.bytecount(total_bytes), elapsed,
1777 util.bytecount(total_bytes / elapsed)))
1774 util.bytecount(total_bytes / elapsed)))
1778 self.reload()
1775 self.reload()
1779 return len(self.heads()) + 1
1776 return len(self.heads()) + 1
1780
1777
1781 def clone(self, remote, heads=[], stream=False):
1778 def clone(self, remote, heads=[], stream=False):
1782 '''clone remote repository.
1779 '''clone remote repository.
1783
1780
1784 keyword arguments:
1781 keyword arguments:
1785 heads: list of revs to clone (forces use of pull)
1782 heads: list of revs to clone (forces use of pull)
1786 stream: use streaming clone if possible'''
1783 stream: use streaming clone if possible'''
1787
1784
1788 # now, all clients that can request uncompressed clones can
1785 # now, all clients that can request uncompressed clones can
1789 # read repo formats supported by all servers that can serve
1786 # read repo formats supported by all servers that can serve
1790 # them.
1787 # them.
1791
1788
1792 # if revlog format changes, client will have to check version
1789 # if revlog format changes, client will have to check version
1793 # and format flags on "stream" capability, and use
1790 # and format flags on "stream" capability, and use
1794 # uncompressed only if compatible.
1791 # uncompressed only if compatible.
1795
1792
1796 if stream and not heads and remote.capable('stream'):
1793 if stream and not heads and remote.capable('stream'):
1797 return self.stream_in(remote)
1794 return self.stream_in(remote)
1798 return self.pull(remote, heads)
1795 return self.pull(remote, heads)
1799
1796
1800 # used to avoid circular references so destructors work
1797 # used to avoid circular references so destructors work
1801 def aftertrans(base):
1798 def aftertrans(base):
1802 p = base
1799 p = base
1803 def a():
1800 def a():
1804 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1801 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1805 util.rename(os.path.join(p, "journal.dirstate"),
1802 util.rename(os.path.join(p, "journal.dirstate"),
1806 os.path.join(p, "undo.dirstate"))
1803 os.path.join(p, "undo.dirstate"))
1807 return a
1804 return a
1808
1805
1809 def instance(ui, path, create):
1806 def instance(ui, path, create):
1810 return localrepository(ui, util.drop_scheme('file', path), create)
1807 return localrepository(ui, util.drop_scheme('file', path), create)
1811
1808
1812 def islocal(path):
1809 def islocal(path):
1813 return True
1810 return True
General Comments 0
You need to be logged in to leave comments. Login now