##// END OF EJS Templates
Improve branch cache sanity check for mq
Matt Mackall -
r3443:e6045fc3 default
parent child Browse files
Show More
@@ -1,1810 +1,1811
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {} # avoid recursion in changectx
296 self.branchcache = {} # avoid recursion in changectx
297
297
298 try:
298 try:
299 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
302 if self.changelog.node(lrev) == last: # sanity check
302 if (lrev < self.changelog.count() and
303 self.changelog.node(lrev) == last): # sanity check
303 for l in f:
304 for l in f:
304 node, label = l.rstrip().split(" ", 1)
305 node, label = l.rstrip().split(" ", 1)
305 self.branchcache[label] = bin(node)
306 self.branchcache[label] = bin(node)
306 f.close()
307 f.close()
307 except IOError:
308 except IOError:
308 last, lrev = nullid, -1
309 last, lrev = nullid, -1
309 lrev = self.changelog.rev(last)
310 lrev = self.changelog.rev(last)
310
311
311 tip = self.changelog.count() - 1
312 tip = self.changelog.count() - 1
312 if lrev != tip:
313 if lrev != tip:
313 for r in xrange(lrev + 1, tip + 1):
314 for r in xrange(lrev + 1, tip + 1):
314 c = self.changectx(r)
315 c = self.changectx(r)
315 b = c.branch()
316 b = c.branch()
316 if b:
317 if b:
317 self.branchcache[b] = c.node()
318 self.branchcache[b] = c.node()
318 self._writebranchcache()
319 self._writebranchcache()
319
320
320 return self.branchcache
321 return self.branchcache
321
322
322 def _writebranchcache(self):
323 def _writebranchcache(self):
323 f = self.opener("branches.cache", "w")
324 f = self.opener("branches.cache", "w")
324 t = self.changelog.tip()
325 t = self.changelog.tip()
325 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 for label, node in self.branchcache.iteritems():
327 for label, node in self.branchcache.iteritems():
327 f.write("%s %s\n" % (hex(node), label))
328 f.write("%s %s\n" % (hex(node), label))
328
329
329 def lookup(self, key):
330 def lookup(self, key):
330 if key == '.':
331 if key == '.':
331 key = self.dirstate.parents()[0]
332 key = self.dirstate.parents()[0]
332 if key == nullid:
333 if key == nullid:
333 raise repo.RepoError(_("no revision checked out"))
334 raise repo.RepoError(_("no revision checked out"))
334 if key in self.tags():
335 if key in self.tags():
335 return self.tags()[key]
336 return self.tags()[key]
336 if key in self.branchtags():
337 if key in self.branchtags():
337 return self.branchtags()[key]
338 return self.branchtags()[key]
338 try:
339 try:
339 return self.changelog.lookup(key)
340 return self.changelog.lookup(key)
340 except:
341 except:
341 raise repo.RepoError(_("unknown revision '%s'") % key)
342 raise repo.RepoError(_("unknown revision '%s'") % key)
342
343
343 def dev(self):
344 def dev(self):
344 return os.lstat(self.path).st_dev
345 return os.lstat(self.path).st_dev
345
346
346 def local(self):
347 def local(self):
347 return True
348 return True
348
349
349 def join(self, f):
350 def join(self, f):
350 return os.path.join(self.path, f)
351 return os.path.join(self.path, f)
351
352
352 def wjoin(self, f):
353 def wjoin(self, f):
353 return os.path.join(self.root, f)
354 return os.path.join(self.root, f)
354
355
355 def file(self, f):
356 def file(self, f):
356 if f[0] == '/':
357 if f[0] == '/':
357 f = f[1:]
358 f = f[1:]
358 return filelog.filelog(self.opener, f, self.revlogversion)
359 return filelog.filelog(self.opener, f, self.revlogversion)
359
360
360 def changectx(self, changeid=None):
361 def changectx(self, changeid=None):
361 return context.changectx(self, changeid)
362 return context.changectx(self, changeid)
362
363
363 def workingctx(self):
364 def workingctx(self):
364 return context.workingctx(self)
365 return context.workingctx(self)
365
366
366 def parents(self, changeid=None):
367 def parents(self, changeid=None):
367 '''
368 '''
368 get list of changectxs for parents of changeid or working directory
369 get list of changectxs for parents of changeid or working directory
369 '''
370 '''
370 if changeid is None:
371 if changeid is None:
371 pl = self.dirstate.parents()
372 pl = self.dirstate.parents()
372 else:
373 else:
373 n = self.changelog.lookup(changeid)
374 n = self.changelog.lookup(changeid)
374 pl = self.changelog.parents(n)
375 pl = self.changelog.parents(n)
375 if pl[1] == nullid:
376 if pl[1] == nullid:
376 return [self.changectx(pl[0])]
377 return [self.changectx(pl[0])]
377 return [self.changectx(pl[0]), self.changectx(pl[1])]
378 return [self.changectx(pl[0]), self.changectx(pl[1])]
378
379
379 def filectx(self, path, changeid=None, fileid=None):
380 def filectx(self, path, changeid=None, fileid=None):
380 """changeid can be a changeset revision, node, or tag.
381 """changeid can be a changeset revision, node, or tag.
381 fileid can be a file revision or node."""
382 fileid can be a file revision or node."""
382 return context.filectx(self, path, changeid, fileid)
383 return context.filectx(self, path, changeid, fileid)
383
384
384 def getcwd(self):
385 def getcwd(self):
385 return self.dirstate.getcwd()
386 return self.dirstate.getcwd()
386
387
387 def wfile(self, f, mode='r'):
388 def wfile(self, f, mode='r'):
388 return self.wopener(f, mode)
389 return self.wopener(f, mode)
389
390
390 def wread(self, filename):
391 def wread(self, filename):
391 if self.encodepats == None:
392 if self.encodepats == None:
392 l = []
393 l = []
393 for pat, cmd in self.ui.configitems("encode"):
394 for pat, cmd in self.ui.configitems("encode"):
394 mf = util.matcher(self.root, "", [pat], [], [])[1]
395 mf = util.matcher(self.root, "", [pat], [], [])[1]
395 l.append((mf, cmd))
396 l.append((mf, cmd))
396 self.encodepats = l
397 self.encodepats = l
397
398
398 data = self.wopener(filename, 'r').read()
399 data = self.wopener(filename, 'r').read()
399
400
400 for mf, cmd in self.encodepats:
401 for mf, cmd in self.encodepats:
401 if mf(filename):
402 if mf(filename):
402 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
403 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
403 data = util.filter(data, cmd)
404 data = util.filter(data, cmd)
404 break
405 break
405
406
406 return data
407 return data
407
408
408 def wwrite(self, filename, data, fd=None):
409 def wwrite(self, filename, data, fd=None):
409 if self.decodepats == None:
410 if self.decodepats == None:
410 l = []
411 l = []
411 for pat, cmd in self.ui.configitems("decode"):
412 for pat, cmd in self.ui.configitems("decode"):
412 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 l.append((mf, cmd))
414 l.append((mf, cmd))
414 self.decodepats = l
415 self.decodepats = l
415
416
416 for mf, cmd in self.decodepats:
417 for mf, cmd in self.decodepats:
417 if mf(filename):
418 if mf(filename):
418 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
419 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
419 data = util.filter(data, cmd)
420 data = util.filter(data, cmd)
420 break
421 break
421
422
422 if fd:
423 if fd:
423 return fd.write(data)
424 return fd.write(data)
424 return self.wopener(filename, 'w').write(data)
425 return self.wopener(filename, 'w').write(data)
425
426
426 def transaction(self):
427 def transaction(self):
427 tr = self.transhandle
428 tr = self.transhandle
428 if tr != None and tr.running():
429 if tr != None and tr.running():
429 return tr.nest()
430 return tr.nest()
430
431
431 # save dirstate for rollback
432 # save dirstate for rollback
432 try:
433 try:
433 ds = self.opener("dirstate").read()
434 ds = self.opener("dirstate").read()
434 except IOError:
435 except IOError:
435 ds = ""
436 ds = ""
436 self.opener("journal.dirstate", "w").write(ds)
437 self.opener("journal.dirstate", "w").write(ds)
437
438
438 tr = transaction.transaction(self.ui.warn, self.opener,
439 tr = transaction.transaction(self.ui.warn, self.opener,
439 self.join("journal"),
440 self.join("journal"),
440 aftertrans(self.path))
441 aftertrans(self.path))
441 self.transhandle = tr
442 self.transhandle = tr
442 return tr
443 return tr
443
444
444 def recover(self):
445 def recover(self):
445 l = self.lock()
446 l = self.lock()
446 if os.path.exists(self.join("journal")):
447 if os.path.exists(self.join("journal")):
447 self.ui.status(_("rolling back interrupted transaction\n"))
448 self.ui.status(_("rolling back interrupted transaction\n"))
448 transaction.rollback(self.opener, self.join("journal"))
449 transaction.rollback(self.opener, self.join("journal"))
449 self.reload()
450 self.reload()
450 return True
451 return True
451 else:
452 else:
452 self.ui.warn(_("no interrupted transaction available\n"))
453 self.ui.warn(_("no interrupted transaction available\n"))
453 return False
454 return False
454
455
455 def rollback(self, wlock=None):
456 def rollback(self, wlock=None):
456 if not wlock:
457 if not wlock:
457 wlock = self.wlock()
458 wlock = self.wlock()
458 l = self.lock()
459 l = self.lock()
459 if os.path.exists(self.join("undo")):
460 if os.path.exists(self.join("undo")):
460 self.ui.status(_("rolling back last transaction\n"))
461 self.ui.status(_("rolling back last transaction\n"))
461 transaction.rollback(self.opener, self.join("undo"))
462 transaction.rollback(self.opener, self.join("undo"))
462 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
463 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
463 self.reload()
464 self.reload()
464 self.wreload()
465 self.wreload()
465 else:
466 else:
466 self.ui.warn(_("no rollback information available\n"))
467 self.ui.warn(_("no rollback information available\n"))
467
468
468 def wreload(self):
469 def wreload(self):
469 self.dirstate.read()
470 self.dirstate.read()
470
471
471 def reload(self):
472 def reload(self):
472 self.changelog.load()
473 self.changelog.load()
473 self.manifest.load()
474 self.manifest.load()
474 self.tagscache = None
475 self.tagscache = None
475 self.nodetagscache = None
476 self.nodetagscache = None
476
477
477 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
478 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
478 desc=None):
479 desc=None):
479 try:
480 try:
480 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
481 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
481 except lock.LockHeld, inst:
482 except lock.LockHeld, inst:
482 if not wait:
483 if not wait:
483 raise
484 raise
484 self.ui.warn(_("waiting for lock on %s held by %s\n") %
485 self.ui.warn(_("waiting for lock on %s held by %s\n") %
485 (desc, inst.args[0]))
486 (desc, inst.args[0]))
486 # default to 600 seconds timeout
487 # default to 600 seconds timeout
487 l = lock.lock(self.join(lockname),
488 l = lock.lock(self.join(lockname),
488 int(self.ui.config("ui", "timeout") or 600),
489 int(self.ui.config("ui", "timeout") or 600),
489 releasefn, desc=desc)
490 releasefn, desc=desc)
490 if acquirefn:
491 if acquirefn:
491 acquirefn()
492 acquirefn()
492 return l
493 return l
493
494
494 def lock(self, wait=1):
495 def lock(self, wait=1):
495 return self.do_lock("lock", wait, acquirefn=self.reload,
496 return self.do_lock("lock", wait, acquirefn=self.reload,
496 desc=_('repository %s') % self.origroot)
497 desc=_('repository %s') % self.origroot)
497
498
498 def wlock(self, wait=1):
499 def wlock(self, wait=1):
499 return self.do_lock("wlock", wait, self.dirstate.write,
500 return self.do_lock("wlock", wait, self.dirstate.write,
500 self.wreload,
501 self.wreload,
501 desc=_('working directory of %s') % self.origroot)
502 desc=_('working directory of %s') % self.origroot)
502
503
503 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
504 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
504 """
505 """
505 commit an individual file as part of a larger transaction
506 commit an individual file as part of a larger transaction
506 """
507 """
507
508
508 t = self.wread(fn)
509 t = self.wread(fn)
509 fl = self.file(fn)
510 fl = self.file(fn)
510 fp1 = manifest1.get(fn, nullid)
511 fp1 = manifest1.get(fn, nullid)
511 fp2 = manifest2.get(fn, nullid)
512 fp2 = manifest2.get(fn, nullid)
512
513
513 meta = {}
514 meta = {}
514 cp = self.dirstate.copied(fn)
515 cp = self.dirstate.copied(fn)
515 if cp:
516 if cp:
516 meta["copy"] = cp
517 meta["copy"] = cp
517 if not manifest2: # not a branch merge
518 if not manifest2: # not a branch merge
518 meta["copyrev"] = hex(manifest1.get(cp, nullid))
519 meta["copyrev"] = hex(manifest1.get(cp, nullid))
519 fp2 = nullid
520 fp2 = nullid
520 elif fp2 != nullid: # copied on remote side
521 elif fp2 != nullid: # copied on remote side
521 meta["copyrev"] = hex(manifest1.get(cp, nullid))
522 meta["copyrev"] = hex(manifest1.get(cp, nullid))
522 else: # copied on local side, reversed
523 else: # copied on local side, reversed
523 meta["copyrev"] = hex(manifest2.get(cp))
524 meta["copyrev"] = hex(manifest2.get(cp))
524 fp2 = nullid
525 fp2 = nullid
525 self.ui.debug(_(" %s: copy %s:%s\n") %
526 self.ui.debug(_(" %s: copy %s:%s\n") %
526 (fn, cp, meta["copyrev"]))
527 (fn, cp, meta["copyrev"]))
527 fp1 = nullid
528 fp1 = nullid
528 elif fp2 != nullid:
529 elif fp2 != nullid:
529 # is one parent an ancestor of the other?
530 # is one parent an ancestor of the other?
530 fpa = fl.ancestor(fp1, fp2)
531 fpa = fl.ancestor(fp1, fp2)
531 if fpa == fp1:
532 if fpa == fp1:
532 fp1, fp2 = fp2, nullid
533 fp1, fp2 = fp2, nullid
533 elif fpa == fp2:
534 elif fpa == fp2:
534 fp2 = nullid
535 fp2 = nullid
535
536
536 # is the file unmodified from the parent? report existing entry
537 # is the file unmodified from the parent? report existing entry
537 if fp2 == nullid and not fl.cmp(fp1, t):
538 if fp2 == nullid and not fl.cmp(fp1, t):
538 return fp1
539 return fp1
539
540
540 changelist.append(fn)
541 changelist.append(fn)
541 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
542 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
542
543
543 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
544 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
544 orig_parent = self.dirstate.parents()[0] or nullid
545 orig_parent = self.dirstate.parents()[0] or nullid
545 p1 = p1 or self.dirstate.parents()[0] or nullid
546 p1 = p1 or self.dirstate.parents()[0] or nullid
546 p2 = p2 or self.dirstate.parents()[1] or nullid
547 p2 = p2 or self.dirstate.parents()[1] or nullid
547 c1 = self.changelog.read(p1)
548 c1 = self.changelog.read(p1)
548 c2 = self.changelog.read(p2)
549 c2 = self.changelog.read(p2)
549 m1 = self.manifest.read(c1[0]).copy()
550 m1 = self.manifest.read(c1[0]).copy()
550 m2 = self.manifest.read(c2[0])
551 m2 = self.manifest.read(c2[0])
551 changed = []
552 changed = []
552 removed = []
553 removed = []
553
554
554 if orig_parent == p1:
555 if orig_parent == p1:
555 update_dirstate = 1
556 update_dirstate = 1
556 else:
557 else:
557 update_dirstate = 0
558 update_dirstate = 0
558
559
559 if not wlock:
560 if not wlock:
560 wlock = self.wlock()
561 wlock = self.wlock()
561 l = self.lock()
562 l = self.lock()
562 tr = self.transaction()
563 tr = self.transaction()
563 linkrev = self.changelog.count()
564 linkrev = self.changelog.count()
564 for f in files:
565 for f in files:
565 try:
566 try:
566 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
567 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 except IOError:
569 except IOError:
569 try:
570 try:
570 del m1[f]
571 del m1[f]
571 if update_dirstate:
572 if update_dirstate:
572 self.dirstate.forget([f])
573 self.dirstate.forget([f])
573 removed.append(f)
574 removed.append(f)
574 except:
575 except:
575 # deleted from p2?
576 # deleted from p2?
576 pass
577 pass
577
578
578 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
579 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
579 user = user or self.ui.username()
580 user = user or self.ui.username()
580 n = self.changelog.add(mnode, changed + removed, text,
581 n = self.changelog.add(mnode, changed + removed, text,
581 tr, p1, p2, user, date)
582 tr, p1, p2, user, date)
582 tr.close()
583 tr.close()
583 if update_dirstate:
584 if update_dirstate:
584 self.dirstate.setparents(n, nullid)
585 self.dirstate.setparents(n, nullid)
585
586
586 def commit(self, files=None, text="", user=None, date=None,
587 def commit(self, files=None, text="", user=None, date=None,
587 match=util.always, force=False, lock=None, wlock=None,
588 match=util.always, force=False, lock=None, wlock=None,
588 force_editor=False):
589 force_editor=False):
589 commit = []
590 commit = []
590 remove = []
591 remove = []
591 changed = []
592 changed = []
592
593
593 if files:
594 if files:
594 for f in files:
595 for f in files:
595 s = self.dirstate.state(f)
596 s = self.dirstate.state(f)
596 if s in 'nmai':
597 if s in 'nmai':
597 commit.append(f)
598 commit.append(f)
598 elif s == 'r':
599 elif s == 'r':
599 remove.append(f)
600 remove.append(f)
600 else:
601 else:
601 self.ui.warn(_("%s not tracked!\n") % f)
602 self.ui.warn(_("%s not tracked!\n") % f)
602 else:
603 else:
603 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
604 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
604 commit = modified + added
605 commit = modified + added
605 remove = removed
606 remove = removed
606
607
607 p1, p2 = self.dirstate.parents()
608 p1, p2 = self.dirstate.parents()
608 c1 = self.changelog.read(p1)
609 c1 = self.changelog.read(p1)
609 c2 = self.changelog.read(p2)
610 c2 = self.changelog.read(p2)
610 m1 = self.manifest.read(c1[0]).copy()
611 m1 = self.manifest.read(c1[0]).copy()
611 m2 = self.manifest.read(c2[0])
612 m2 = self.manifest.read(c2[0])
612
613
613 branchname = self.workingctx().branch()
614 branchname = self.workingctx().branch()
614 oldname = c1[5].get("branch", "")
615 oldname = c1[5].get("branch", "")
615
616
616 if not commit and not remove and not force and p2 == nullid and \
617 if not commit and not remove and not force and p2 == nullid and \
617 branchname == oldname:
618 branchname == oldname:
618 self.ui.status(_("nothing changed\n"))
619 self.ui.status(_("nothing changed\n"))
619 return None
620 return None
620
621
621 xp1 = hex(p1)
622 xp1 = hex(p1)
622 if p2 == nullid: xp2 = ''
623 if p2 == nullid: xp2 = ''
623 else: xp2 = hex(p2)
624 else: xp2 = hex(p2)
624
625
625 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
626 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
626
627
627 if not wlock:
628 if not wlock:
628 wlock = self.wlock()
629 wlock = self.wlock()
629 if not lock:
630 if not lock:
630 lock = self.lock()
631 lock = self.lock()
631 tr = self.transaction()
632 tr = self.transaction()
632
633
633 # check in files
634 # check in files
634 new = {}
635 new = {}
635 linkrev = self.changelog.count()
636 linkrev = self.changelog.count()
636 commit.sort()
637 commit.sort()
637 for f in commit:
638 for f in commit:
638 self.ui.note(f + "\n")
639 self.ui.note(f + "\n")
639 try:
640 try:
640 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
641 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
641 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
642 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
642 except IOError:
643 except IOError:
643 self.ui.warn(_("trouble committing %s!\n") % f)
644 self.ui.warn(_("trouble committing %s!\n") % f)
644 raise
645 raise
645
646
646 # update manifest
647 # update manifest
647 m1.update(new)
648 m1.update(new)
648 for f in remove:
649 for f in remove:
649 if f in m1:
650 if f in m1:
650 del m1[f]
651 del m1[f]
651 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
652 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
652
653
653 # add changeset
654 # add changeset
654 new = new.keys()
655 new = new.keys()
655 new.sort()
656 new.sort()
656
657
657 user = user or self.ui.username()
658 user = user or self.ui.username()
658 if not text or force_editor:
659 if not text or force_editor:
659 edittext = []
660 edittext = []
660 if text:
661 if text:
661 edittext.append(text)
662 edittext.append(text)
662 edittext.append("")
663 edittext.append("")
663 if p2 != nullid:
664 if p2 != nullid:
664 edittext.append("HG: branch merge")
665 edittext.append("HG: branch merge")
665 edittext.extend(["HG: changed %s" % f for f in changed])
666 edittext.extend(["HG: changed %s" % f for f in changed])
666 edittext.extend(["HG: removed %s" % f for f in remove])
667 edittext.extend(["HG: removed %s" % f for f in remove])
667 if not changed and not remove:
668 if not changed and not remove:
668 edittext.append("HG: no files changed")
669 edittext.append("HG: no files changed")
669 edittext.append("")
670 edittext.append("")
670 # run editor in the repository root
671 # run editor in the repository root
671 olddir = os.getcwd()
672 olddir = os.getcwd()
672 os.chdir(self.root)
673 os.chdir(self.root)
673 text = self.ui.edit("\n".join(edittext), user)
674 text = self.ui.edit("\n".join(edittext), user)
674 os.chdir(olddir)
675 os.chdir(olddir)
675
676
676 lines = [line.rstrip() for line in text.rstrip().splitlines()]
677 lines = [line.rstrip() for line in text.rstrip().splitlines()]
677 while lines and not lines[0]:
678 while lines and not lines[0]:
678 del lines[0]
679 del lines[0]
679 if not lines:
680 if not lines:
680 return None
681 return None
681 text = '\n'.join(lines)
682 text = '\n'.join(lines)
682 extra = {}
683 extra = {}
683 if branchname:
684 if branchname:
684 extra["branch"] = branchname
685 extra["branch"] = branchname
685 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
686 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
686 user, date, extra)
687 user, date, extra)
687 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
688 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
688 parent2=xp2)
689 parent2=xp2)
689 tr.close()
690 tr.close()
690
691
691 self.dirstate.setparents(n)
692 self.dirstate.setparents(n)
692 self.dirstate.update(new, "n")
693 self.dirstate.update(new, "n")
693 self.dirstate.forget(remove)
694 self.dirstate.forget(remove)
694
695
695 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
696 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
696 return n
697 return n
697
698
698 def walk(self, node=None, files=[], match=util.always, badmatch=None):
699 def walk(self, node=None, files=[], match=util.always, badmatch=None):
699 if node:
700 if node:
700 fdict = dict.fromkeys(files)
701 fdict = dict.fromkeys(files)
701 for fn in self.manifest.read(self.changelog.read(node)[0]):
702 for fn in self.manifest.read(self.changelog.read(node)[0]):
702 for ffn in fdict:
703 for ffn in fdict:
703 # match if the file is the exact name or a directory
704 # match if the file is the exact name or a directory
704 if ffn == fn or fn.startswith("%s/" % ffn):
705 if ffn == fn or fn.startswith("%s/" % ffn):
705 del fdict[ffn]
706 del fdict[ffn]
706 break
707 break
707 if match(fn):
708 if match(fn):
708 yield 'm', fn
709 yield 'm', fn
709 for fn in fdict:
710 for fn in fdict:
710 if badmatch and badmatch(fn):
711 if badmatch and badmatch(fn):
711 if match(fn):
712 if match(fn):
712 yield 'b', fn
713 yield 'b', fn
713 else:
714 else:
714 self.ui.warn(_('%s: No such file in rev %s\n') % (
715 self.ui.warn(_('%s: No such file in rev %s\n') % (
715 util.pathto(self.getcwd(), fn), short(node)))
716 util.pathto(self.getcwd(), fn), short(node)))
716 else:
717 else:
717 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
718 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
718 yield src, fn
719 yield src, fn
719
720
720 def status(self, node1=None, node2=None, files=[], match=util.always,
721 def status(self, node1=None, node2=None, files=[], match=util.always,
721 wlock=None, list_ignored=False, list_clean=False):
722 wlock=None, list_ignored=False, list_clean=False):
722 """return status of files between two nodes or node and working directory
723 """return status of files between two nodes or node and working directory
723
724
724 If node1 is None, use the first dirstate parent instead.
725 If node1 is None, use the first dirstate parent instead.
725 If node2 is None, compare node1 with working directory.
726 If node2 is None, compare node1 with working directory.
726 """
727 """
727
728
728 def fcmp(fn, mf):
729 def fcmp(fn, mf):
729 t1 = self.wread(fn)
730 t1 = self.wread(fn)
730 return self.file(fn).cmp(mf.get(fn, nullid), t1)
731 return self.file(fn).cmp(mf.get(fn, nullid), t1)
731
732
732 def mfmatches(node):
733 def mfmatches(node):
733 change = self.changelog.read(node)
734 change = self.changelog.read(node)
734 mf = self.manifest.read(change[0]).copy()
735 mf = self.manifest.read(change[0]).copy()
735 for fn in mf.keys():
736 for fn in mf.keys():
736 if not match(fn):
737 if not match(fn):
737 del mf[fn]
738 del mf[fn]
738 return mf
739 return mf
739
740
740 modified, added, removed, deleted, unknown = [], [], [], [], []
741 modified, added, removed, deleted, unknown = [], [], [], [], []
741 ignored, clean = [], []
742 ignored, clean = [], []
742
743
743 compareworking = False
744 compareworking = False
744 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
745 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
745 compareworking = True
746 compareworking = True
746
747
747 if not compareworking:
748 if not compareworking:
748 # read the manifest from node1 before the manifest from node2,
749 # read the manifest from node1 before the manifest from node2,
749 # so that we'll hit the manifest cache if we're going through
750 # so that we'll hit the manifest cache if we're going through
750 # all the revisions in parent->child order.
751 # all the revisions in parent->child order.
751 mf1 = mfmatches(node1)
752 mf1 = mfmatches(node1)
752
753
753 # are we comparing the working directory?
754 # are we comparing the working directory?
754 if not node2:
755 if not node2:
755 if not wlock:
756 if not wlock:
756 try:
757 try:
757 wlock = self.wlock(wait=0)
758 wlock = self.wlock(wait=0)
758 except lock.LockException:
759 except lock.LockException:
759 wlock = None
760 wlock = None
760 (lookup, modified, added, removed, deleted, unknown,
761 (lookup, modified, added, removed, deleted, unknown,
761 ignored, clean) = self.dirstate.status(files, match,
762 ignored, clean) = self.dirstate.status(files, match,
762 list_ignored, list_clean)
763 list_ignored, list_clean)
763
764
764 # are we comparing working dir against its parent?
765 # are we comparing working dir against its parent?
765 if compareworking:
766 if compareworking:
766 if lookup:
767 if lookup:
767 # do a full compare of any files that might have changed
768 # do a full compare of any files that might have changed
768 mf2 = mfmatches(self.dirstate.parents()[0])
769 mf2 = mfmatches(self.dirstate.parents()[0])
769 for f in lookup:
770 for f in lookup:
770 if fcmp(f, mf2):
771 if fcmp(f, mf2):
771 modified.append(f)
772 modified.append(f)
772 else:
773 else:
773 clean.append(f)
774 clean.append(f)
774 if wlock is not None:
775 if wlock is not None:
775 self.dirstate.update([f], "n")
776 self.dirstate.update([f], "n")
776 else:
777 else:
777 # we are comparing working dir against non-parent
778 # we are comparing working dir against non-parent
778 # generate a pseudo-manifest for the working dir
779 # generate a pseudo-manifest for the working dir
779 # XXX: create it in dirstate.py ?
780 # XXX: create it in dirstate.py ?
780 mf2 = mfmatches(self.dirstate.parents()[0])
781 mf2 = mfmatches(self.dirstate.parents()[0])
781 for f in lookup + modified + added:
782 for f in lookup + modified + added:
782 mf2[f] = ""
783 mf2[f] = ""
783 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
784 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
784 for f in removed:
785 for f in removed:
785 if f in mf2:
786 if f in mf2:
786 del mf2[f]
787 del mf2[f]
787 else:
788 else:
788 # we are comparing two revisions
789 # we are comparing two revisions
789 mf2 = mfmatches(node2)
790 mf2 = mfmatches(node2)
790
791
791 if not compareworking:
792 if not compareworking:
792 # flush lists from dirstate before comparing manifests
793 # flush lists from dirstate before comparing manifests
793 modified, added, clean = [], [], []
794 modified, added, clean = [], [], []
794
795
795 # make sure to sort the files so we talk to the disk in a
796 # make sure to sort the files so we talk to the disk in a
796 # reasonable order
797 # reasonable order
797 mf2keys = mf2.keys()
798 mf2keys = mf2.keys()
798 mf2keys.sort()
799 mf2keys.sort()
799 for fn in mf2keys:
800 for fn in mf2keys:
800 if mf1.has_key(fn):
801 if mf1.has_key(fn):
801 if mf1.flags(fn) != mf2.flags(fn) or \
802 if mf1.flags(fn) != mf2.flags(fn) or \
802 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
803 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
803 modified.append(fn)
804 modified.append(fn)
804 elif list_clean:
805 elif list_clean:
805 clean.append(fn)
806 clean.append(fn)
806 del mf1[fn]
807 del mf1[fn]
807 else:
808 else:
808 added.append(fn)
809 added.append(fn)
809
810
810 removed = mf1.keys()
811 removed = mf1.keys()
811
812
812 # sort and return results:
813 # sort and return results:
813 for l in modified, added, removed, deleted, unknown, ignored, clean:
814 for l in modified, added, removed, deleted, unknown, ignored, clean:
814 l.sort()
815 l.sort()
815 return (modified, added, removed, deleted, unknown, ignored, clean)
816 return (modified, added, removed, deleted, unknown, ignored, clean)
816
817
817 def add(self, list, wlock=None):
818 def add(self, list, wlock=None):
818 if not wlock:
819 if not wlock:
819 wlock = self.wlock()
820 wlock = self.wlock()
820 for f in list:
821 for f in list:
821 p = self.wjoin(f)
822 p = self.wjoin(f)
822 if not os.path.exists(p):
823 if not os.path.exists(p):
823 self.ui.warn(_("%s does not exist!\n") % f)
824 self.ui.warn(_("%s does not exist!\n") % f)
824 elif not os.path.isfile(p):
825 elif not os.path.isfile(p):
825 self.ui.warn(_("%s not added: only files supported currently\n")
826 self.ui.warn(_("%s not added: only files supported currently\n")
826 % f)
827 % f)
827 elif self.dirstate.state(f) in 'an':
828 elif self.dirstate.state(f) in 'an':
828 self.ui.warn(_("%s already tracked!\n") % f)
829 self.ui.warn(_("%s already tracked!\n") % f)
829 else:
830 else:
830 self.dirstate.update([f], "a")
831 self.dirstate.update([f], "a")
831
832
832 def forget(self, list, wlock=None):
833 def forget(self, list, wlock=None):
833 if not wlock:
834 if not wlock:
834 wlock = self.wlock()
835 wlock = self.wlock()
835 for f in list:
836 for f in list:
836 if self.dirstate.state(f) not in 'ai':
837 if self.dirstate.state(f) not in 'ai':
837 self.ui.warn(_("%s not added!\n") % f)
838 self.ui.warn(_("%s not added!\n") % f)
838 else:
839 else:
839 self.dirstate.forget([f])
840 self.dirstate.forget([f])
840
841
841 def remove(self, list, unlink=False, wlock=None):
842 def remove(self, list, unlink=False, wlock=None):
842 if unlink:
843 if unlink:
843 for f in list:
844 for f in list:
844 try:
845 try:
845 util.unlink(self.wjoin(f))
846 util.unlink(self.wjoin(f))
846 except OSError, inst:
847 except OSError, inst:
847 if inst.errno != errno.ENOENT:
848 if inst.errno != errno.ENOENT:
848 raise
849 raise
849 if not wlock:
850 if not wlock:
850 wlock = self.wlock()
851 wlock = self.wlock()
851 for f in list:
852 for f in list:
852 p = self.wjoin(f)
853 p = self.wjoin(f)
853 if os.path.exists(p):
854 if os.path.exists(p):
854 self.ui.warn(_("%s still exists!\n") % f)
855 self.ui.warn(_("%s still exists!\n") % f)
855 elif self.dirstate.state(f) == 'a':
856 elif self.dirstate.state(f) == 'a':
856 self.dirstate.forget([f])
857 self.dirstate.forget([f])
857 elif f not in self.dirstate:
858 elif f not in self.dirstate:
858 self.ui.warn(_("%s not tracked!\n") % f)
859 self.ui.warn(_("%s not tracked!\n") % f)
859 else:
860 else:
860 self.dirstate.update([f], "r")
861 self.dirstate.update([f], "r")
861
862
862 def undelete(self, list, wlock=None):
863 def undelete(self, list, wlock=None):
863 p = self.dirstate.parents()[0]
864 p = self.dirstate.parents()[0]
864 mn = self.changelog.read(p)[0]
865 mn = self.changelog.read(p)[0]
865 m = self.manifest.read(mn)
866 m = self.manifest.read(mn)
866 if not wlock:
867 if not wlock:
867 wlock = self.wlock()
868 wlock = self.wlock()
868 for f in list:
869 for f in list:
869 if self.dirstate.state(f) not in "r":
870 if self.dirstate.state(f) not in "r":
870 self.ui.warn("%s not removed!\n" % f)
871 self.ui.warn("%s not removed!\n" % f)
871 else:
872 else:
872 t = self.file(f).read(m[f])
873 t = self.file(f).read(m[f])
873 self.wwrite(f, t)
874 self.wwrite(f, t)
874 util.set_exec(self.wjoin(f), m.execf(f))
875 util.set_exec(self.wjoin(f), m.execf(f))
875 self.dirstate.update([f], "n")
876 self.dirstate.update([f], "n")
876
877
877 def copy(self, source, dest, wlock=None):
878 def copy(self, source, dest, wlock=None):
878 p = self.wjoin(dest)
879 p = self.wjoin(dest)
879 if not os.path.exists(p):
880 if not os.path.exists(p):
880 self.ui.warn(_("%s does not exist!\n") % dest)
881 self.ui.warn(_("%s does not exist!\n") % dest)
881 elif not os.path.isfile(p):
882 elif not os.path.isfile(p):
882 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
883 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
883 else:
884 else:
884 if not wlock:
885 if not wlock:
885 wlock = self.wlock()
886 wlock = self.wlock()
886 if self.dirstate.state(dest) == '?':
887 if self.dirstate.state(dest) == '?':
887 self.dirstate.update([dest], "a")
888 self.dirstate.update([dest], "a")
888 self.dirstate.copy(source, dest)
889 self.dirstate.copy(source, dest)
889
890
890 def heads(self, start=None):
891 def heads(self, start=None):
891 heads = self.changelog.heads(start)
892 heads = self.changelog.heads(start)
892 # sort the output in rev descending order
893 # sort the output in rev descending order
893 heads = [(-self.changelog.rev(h), h) for h in heads]
894 heads = [(-self.changelog.rev(h), h) for h in heads]
894 heads.sort()
895 heads.sort()
895 return [n for (r, n) in heads]
896 return [n for (r, n) in heads]
896
897
897 # branchlookup returns a dict giving a list of branches for
898 # branchlookup returns a dict giving a list of branches for
898 # each head. A branch is defined as the tag of a node or
899 # each head. A branch is defined as the tag of a node or
899 # the branch of the node's parents. If a node has multiple
900 # the branch of the node's parents. If a node has multiple
900 # branch tags, tags are eliminated if they are visible from other
901 # branch tags, tags are eliminated if they are visible from other
901 # branch tags.
902 # branch tags.
902 #
903 #
903 # So, for this graph: a->b->c->d->e
904 # So, for this graph: a->b->c->d->e
904 # \ /
905 # \ /
905 # aa -----/
906 # aa -----/
906 # a has tag 2.6.12
907 # a has tag 2.6.12
907 # d has tag 2.6.13
908 # d has tag 2.6.13
908 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
909 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
909 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
910 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
910 # from the list.
911 # from the list.
911 #
912 #
912 # It is possible that more than one head will have the same branch tag.
913 # It is possible that more than one head will have the same branch tag.
913 # callers need to check the result for multiple heads under the same
914 # callers need to check the result for multiple heads under the same
914 # branch tag if that is a problem for them (ie checkout of a specific
915 # branch tag if that is a problem for them (ie checkout of a specific
915 # branch).
916 # branch).
916 #
917 #
917 # passing in a specific branch will limit the depth of the search
918 # passing in a specific branch will limit the depth of the search
918 # through the parents. It won't limit the branches returned in the
919 # through the parents. It won't limit the branches returned in the
919 # result though.
920 # result though.
920 def branchlookup(self, heads=None, branch=None):
921 def branchlookup(self, heads=None, branch=None):
921 if not heads:
922 if not heads:
922 heads = self.heads()
923 heads = self.heads()
923 headt = [ h for h in heads ]
924 headt = [ h for h in heads ]
924 chlog = self.changelog
925 chlog = self.changelog
925 branches = {}
926 branches = {}
926 merges = []
927 merges = []
927 seenmerge = {}
928 seenmerge = {}
928
929
929 # traverse the tree once for each head, recording in the branches
930 # traverse the tree once for each head, recording in the branches
930 # dict which tags are visible from this head. The branches
931 # dict which tags are visible from this head. The branches
931 # dict also records which tags are visible from each tag
932 # dict also records which tags are visible from each tag
932 # while we traverse.
933 # while we traverse.
933 while headt or merges:
934 while headt or merges:
934 if merges:
935 if merges:
935 n, found = merges.pop()
936 n, found = merges.pop()
936 visit = [n]
937 visit = [n]
937 else:
938 else:
938 h = headt.pop()
939 h = headt.pop()
939 visit = [h]
940 visit = [h]
940 found = [h]
941 found = [h]
941 seen = {}
942 seen = {}
942 while visit:
943 while visit:
943 n = visit.pop()
944 n = visit.pop()
944 if n in seen:
945 if n in seen:
945 continue
946 continue
946 pp = chlog.parents(n)
947 pp = chlog.parents(n)
947 tags = self.nodetags(n)
948 tags = self.nodetags(n)
948 if tags:
949 if tags:
949 for x in tags:
950 for x in tags:
950 if x == 'tip':
951 if x == 'tip':
951 continue
952 continue
952 for f in found:
953 for f in found:
953 branches.setdefault(f, {})[n] = 1
954 branches.setdefault(f, {})[n] = 1
954 branches.setdefault(n, {})[n] = 1
955 branches.setdefault(n, {})[n] = 1
955 break
956 break
956 if n not in found:
957 if n not in found:
957 found.append(n)
958 found.append(n)
958 if branch in tags:
959 if branch in tags:
959 continue
960 continue
960 seen[n] = 1
961 seen[n] = 1
961 if pp[1] != nullid and n not in seenmerge:
962 if pp[1] != nullid and n not in seenmerge:
962 merges.append((pp[1], [x for x in found]))
963 merges.append((pp[1], [x for x in found]))
963 seenmerge[n] = 1
964 seenmerge[n] = 1
964 if pp[0] != nullid:
965 if pp[0] != nullid:
965 visit.append(pp[0])
966 visit.append(pp[0])
966 # traverse the branches dict, eliminating branch tags from each
967 # traverse the branches dict, eliminating branch tags from each
967 # head that are visible from another branch tag for that head.
968 # head that are visible from another branch tag for that head.
968 out = {}
969 out = {}
969 viscache = {}
970 viscache = {}
970 for h in heads:
971 for h in heads:
971 def visible(node):
972 def visible(node):
972 if node in viscache:
973 if node in viscache:
973 return viscache[node]
974 return viscache[node]
974 ret = {}
975 ret = {}
975 visit = [node]
976 visit = [node]
976 while visit:
977 while visit:
977 x = visit.pop()
978 x = visit.pop()
978 if x in viscache:
979 if x in viscache:
979 ret.update(viscache[x])
980 ret.update(viscache[x])
980 elif x not in ret:
981 elif x not in ret:
981 ret[x] = 1
982 ret[x] = 1
982 if x in branches:
983 if x in branches:
983 visit[len(visit):] = branches[x].keys()
984 visit[len(visit):] = branches[x].keys()
984 viscache[node] = ret
985 viscache[node] = ret
985 return ret
986 return ret
986 if h not in branches:
987 if h not in branches:
987 continue
988 continue
988 # O(n^2), but somewhat limited. This only searches the
989 # O(n^2), but somewhat limited. This only searches the
989 # tags visible from a specific head, not all the tags in the
990 # tags visible from a specific head, not all the tags in the
990 # whole repo.
991 # whole repo.
991 for b in branches[h]:
992 for b in branches[h]:
992 vis = False
993 vis = False
993 for bb in branches[h].keys():
994 for bb in branches[h].keys():
994 if b != bb:
995 if b != bb:
995 if b in visible(bb):
996 if b in visible(bb):
996 vis = True
997 vis = True
997 break
998 break
998 if not vis:
999 if not vis:
999 l = out.setdefault(h, [])
1000 l = out.setdefault(h, [])
1000 l[len(l):] = self.nodetags(b)
1001 l[len(l):] = self.nodetags(b)
1001 return out
1002 return out
1002
1003
1003 def branches(self, nodes):
1004 def branches(self, nodes):
1004 if not nodes:
1005 if not nodes:
1005 nodes = [self.changelog.tip()]
1006 nodes = [self.changelog.tip()]
1006 b = []
1007 b = []
1007 for n in nodes:
1008 for n in nodes:
1008 t = n
1009 t = n
1009 while 1:
1010 while 1:
1010 p = self.changelog.parents(n)
1011 p = self.changelog.parents(n)
1011 if p[1] != nullid or p[0] == nullid:
1012 if p[1] != nullid or p[0] == nullid:
1012 b.append((t, n, p[0], p[1]))
1013 b.append((t, n, p[0], p[1]))
1013 break
1014 break
1014 n = p[0]
1015 n = p[0]
1015 return b
1016 return b
1016
1017
1017 def between(self, pairs):
1018 def between(self, pairs):
1018 r = []
1019 r = []
1019
1020
1020 for top, bottom in pairs:
1021 for top, bottom in pairs:
1021 n, l, i = top, [], 0
1022 n, l, i = top, [], 0
1022 f = 1
1023 f = 1
1023
1024
1024 while n != bottom:
1025 while n != bottom:
1025 p = self.changelog.parents(n)[0]
1026 p = self.changelog.parents(n)[0]
1026 if i == f:
1027 if i == f:
1027 l.append(n)
1028 l.append(n)
1028 f = f * 2
1029 f = f * 2
1029 n = p
1030 n = p
1030 i += 1
1031 i += 1
1031
1032
1032 r.append(l)
1033 r.append(l)
1033
1034
1034 return r
1035 return r
1035
1036
1036 def findincoming(self, remote, base=None, heads=None, force=False):
1037 def findincoming(self, remote, base=None, heads=None, force=False):
1037 """Return list of roots of the subsets of missing nodes from remote
1038 """Return list of roots of the subsets of missing nodes from remote
1038
1039
1039 If base dict is specified, assume that these nodes and their parents
1040 If base dict is specified, assume that these nodes and their parents
1040 exist on the remote side and that no child of a node of base exists
1041 exist on the remote side and that no child of a node of base exists
1041 in both remote and self.
1042 in both remote and self.
1042 Furthermore base will be updated to include the nodes that exists
1043 Furthermore base will be updated to include the nodes that exists
1043 in self and remote but no children exists in self and remote.
1044 in self and remote but no children exists in self and remote.
1044 If a list of heads is specified, return only nodes which are heads
1045 If a list of heads is specified, return only nodes which are heads
1045 or ancestors of these heads.
1046 or ancestors of these heads.
1046
1047
1047 All the ancestors of base are in self and in remote.
1048 All the ancestors of base are in self and in remote.
1048 All the descendants of the list returned are missing in self.
1049 All the descendants of the list returned are missing in self.
1049 (and so we know that the rest of the nodes are missing in remote, see
1050 (and so we know that the rest of the nodes are missing in remote, see
1050 outgoing)
1051 outgoing)
1051 """
1052 """
1052 m = self.changelog.nodemap
1053 m = self.changelog.nodemap
1053 search = []
1054 search = []
1054 fetch = {}
1055 fetch = {}
1055 seen = {}
1056 seen = {}
1056 seenbranch = {}
1057 seenbranch = {}
1057 if base == None:
1058 if base == None:
1058 base = {}
1059 base = {}
1059
1060
1060 if not heads:
1061 if not heads:
1061 heads = remote.heads()
1062 heads = remote.heads()
1062
1063
1063 if self.changelog.tip() == nullid:
1064 if self.changelog.tip() == nullid:
1064 base[nullid] = 1
1065 base[nullid] = 1
1065 if heads != [nullid]:
1066 if heads != [nullid]:
1066 return [nullid]
1067 return [nullid]
1067 return []
1068 return []
1068
1069
1069 # assume we're closer to the tip than the root
1070 # assume we're closer to the tip than the root
1070 # and start by examining the heads
1071 # and start by examining the heads
1071 self.ui.status(_("searching for changes\n"))
1072 self.ui.status(_("searching for changes\n"))
1072
1073
1073 unknown = []
1074 unknown = []
1074 for h in heads:
1075 for h in heads:
1075 if h not in m:
1076 if h not in m:
1076 unknown.append(h)
1077 unknown.append(h)
1077 else:
1078 else:
1078 base[h] = 1
1079 base[h] = 1
1079
1080
1080 if not unknown:
1081 if not unknown:
1081 return []
1082 return []
1082
1083
1083 req = dict.fromkeys(unknown)
1084 req = dict.fromkeys(unknown)
1084 reqcnt = 0
1085 reqcnt = 0
1085
1086
1086 # search through remote branches
1087 # search through remote branches
1087 # a 'branch' here is a linear segment of history, with four parts:
1088 # a 'branch' here is a linear segment of history, with four parts:
1088 # head, root, first parent, second parent
1089 # head, root, first parent, second parent
1089 # (a branch always has two parents (or none) by definition)
1090 # (a branch always has two parents (or none) by definition)
1090 unknown = remote.branches(unknown)
1091 unknown = remote.branches(unknown)
1091 while unknown:
1092 while unknown:
1092 r = []
1093 r = []
1093 while unknown:
1094 while unknown:
1094 n = unknown.pop(0)
1095 n = unknown.pop(0)
1095 if n[0] in seen:
1096 if n[0] in seen:
1096 continue
1097 continue
1097
1098
1098 self.ui.debug(_("examining %s:%s\n")
1099 self.ui.debug(_("examining %s:%s\n")
1099 % (short(n[0]), short(n[1])))
1100 % (short(n[0]), short(n[1])))
1100 if n[0] == nullid: # found the end of the branch
1101 if n[0] == nullid: # found the end of the branch
1101 pass
1102 pass
1102 elif n in seenbranch:
1103 elif n in seenbranch:
1103 self.ui.debug(_("branch already found\n"))
1104 self.ui.debug(_("branch already found\n"))
1104 continue
1105 continue
1105 elif n[1] and n[1] in m: # do we know the base?
1106 elif n[1] and n[1] in m: # do we know the base?
1106 self.ui.debug(_("found incomplete branch %s:%s\n")
1107 self.ui.debug(_("found incomplete branch %s:%s\n")
1107 % (short(n[0]), short(n[1])))
1108 % (short(n[0]), short(n[1])))
1108 search.append(n) # schedule branch range for scanning
1109 search.append(n) # schedule branch range for scanning
1109 seenbranch[n] = 1
1110 seenbranch[n] = 1
1110 else:
1111 else:
1111 if n[1] not in seen and n[1] not in fetch:
1112 if n[1] not in seen and n[1] not in fetch:
1112 if n[2] in m and n[3] in m:
1113 if n[2] in m and n[3] in m:
1113 self.ui.debug(_("found new changeset %s\n") %
1114 self.ui.debug(_("found new changeset %s\n") %
1114 short(n[1]))
1115 short(n[1]))
1115 fetch[n[1]] = 1 # earliest unknown
1116 fetch[n[1]] = 1 # earliest unknown
1116 for p in n[2:4]:
1117 for p in n[2:4]:
1117 if p in m:
1118 if p in m:
1118 base[p] = 1 # latest known
1119 base[p] = 1 # latest known
1119
1120
1120 for p in n[2:4]:
1121 for p in n[2:4]:
1121 if p not in req and p not in m:
1122 if p not in req and p not in m:
1122 r.append(p)
1123 r.append(p)
1123 req[p] = 1
1124 req[p] = 1
1124 seen[n[0]] = 1
1125 seen[n[0]] = 1
1125
1126
1126 if r:
1127 if r:
1127 reqcnt += 1
1128 reqcnt += 1
1128 self.ui.debug(_("request %d: %s\n") %
1129 self.ui.debug(_("request %d: %s\n") %
1129 (reqcnt, " ".join(map(short, r))))
1130 (reqcnt, " ".join(map(short, r))))
1130 for p in range(0, len(r), 10):
1131 for p in range(0, len(r), 10):
1131 for b in remote.branches(r[p:p+10]):
1132 for b in remote.branches(r[p:p+10]):
1132 self.ui.debug(_("received %s:%s\n") %
1133 self.ui.debug(_("received %s:%s\n") %
1133 (short(b[0]), short(b[1])))
1134 (short(b[0]), short(b[1])))
1134 unknown.append(b)
1135 unknown.append(b)
1135
1136
1136 # do binary search on the branches we found
1137 # do binary search on the branches we found
1137 while search:
1138 while search:
1138 n = search.pop(0)
1139 n = search.pop(0)
1139 reqcnt += 1
1140 reqcnt += 1
1140 l = remote.between([(n[0], n[1])])[0]
1141 l = remote.between([(n[0], n[1])])[0]
1141 l.append(n[1])
1142 l.append(n[1])
1142 p = n[0]
1143 p = n[0]
1143 f = 1
1144 f = 1
1144 for i in l:
1145 for i in l:
1145 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1146 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1146 if i in m:
1147 if i in m:
1147 if f <= 2:
1148 if f <= 2:
1148 self.ui.debug(_("found new branch changeset %s\n") %
1149 self.ui.debug(_("found new branch changeset %s\n") %
1149 short(p))
1150 short(p))
1150 fetch[p] = 1
1151 fetch[p] = 1
1151 base[i] = 1
1152 base[i] = 1
1152 else:
1153 else:
1153 self.ui.debug(_("narrowed branch search to %s:%s\n")
1154 self.ui.debug(_("narrowed branch search to %s:%s\n")
1154 % (short(p), short(i)))
1155 % (short(p), short(i)))
1155 search.append((p, i))
1156 search.append((p, i))
1156 break
1157 break
1157 p, f = i, f * 2
1158 p, f = i, f * 2
1158
1159
1159 # sanity check our fetch list
1160 # sanity check our fetch list
1160 for f in fetch.keys():
1161 for f in fetch.keys():
1161 if f in m:
1162 if f in m:
1162 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1163 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1163
1164
1164 if base.keys() == [nullid]:
1165 if base.keys() == [nullid]:
1165 if force:
1166 if force:
1166 self.ui.warn(_("warning: repository is unrelated\n"))
1167 self.ui.warn(_("warning: repository is unrelated\n"))
1167 else:
1168 else:
1168 raise util.Abort(_("repository is unrelated"))
1169 raise util.Abort(_("repository is unrelated"))
1169
1170
1170 self.ui.debug(_("found new changesets starting at ") +
1171 self.ui.debug(_("found new changesets starting at ") +
1171 " ".join([short(f) for f in fetch]) + "\n")
1172 " ".join([short(f) for f in fetch]) + "\n")
1172
1173
1173 self.ui.debug(_("%d total queries\n") % reqcnt)
1174 self.ui.debug(_("%d total queries\n") % reqcnt)
1174
1175
1175 return fetch.keys()
1176 return fetch.keys()
1176
1177
1177 def findoutgoing(self, remote, base=None, heads=None, force=False):
1178 def findoutgoing(self, remote, base=None, heads=None, force=False):
1178 """Return list of nodes that are roots of subsets not in remote
1179 """Return list of nodes that are roots of subsets not in remote
1179
1180
1180 If base dict is specified, assume that these nodes and their parents
1181 If base dict is specified, assume that these nodes and their parents
1181 exist on the remote side.
1182 exist on the remote side.
1182 If a list of heads is specified, return only nodes which are heads
1183 If a list of heads is specified, return only nodes which are heads
1183 or ancestors of these heads, and return a second element which
1184 or ancestors of these heads, and return a second element which
1184 contains all remote heads which get new children.
1185 contains all remote heads which get new children.
1185 """
1186 """
1186 if base == None:
1187 if base == None:
1187 base = {}
1188 base = {}
1188 self.findincoming(remote, base, heads, force=force)
1189 self.findincoming(remote, base, heads, force=force)
1189
1190
1190 self.ui.debug(_("common changesets up to ")
1191 self.ui.debug(_("common changesets up to ")
1191 + " ".join(map(short, base.keys())) + "\n")
1192 + " ".join(map(short, base.keys())) + "\n")
1192
1193
1193 remain = dict.fromkeys(self.changelog.nodemap)
1194 remain = dict.fromkeys(self.changelog.nodemap)
1194
1195
1195 # prune everything remote has from the tree
1196 # prune everything remote has from the tree
1196 del remain[nullid]
1197 del remain[nullid]
1197 remove = base.keys()
1198 remove = base.keys()
1198 while remove:
1199 while remove:
1199 n = remove.pop(0)
1200 n = remove.pop(0)
1200 if n in remain:
1201 if n in remain:
1201 del remain[n]
1202 del remain[n]
1202 for p in self.changelog.parents(n):
1203 for p in self.changelog.parents(n):
1203 remove.append(p)
1204 remove.append(p)
1204
1205
1205 # find every node whose parents have been pruned
1206 # find every node whose parents have been pruned
1206 subset = []
1207 subset = []
1207 # find every remote head that will get new children
1208 # find every remote head that will get new children
1208 updated_heads = {}
1209 updated_heads = {}
1209 for n in remain:
1210 for n in remain:
1210 p1, p2 = self.changelog.parents(n)
1211 p1, p2 = self.changelog.parents(n)
1211 if p1 not in remain and p2 not in remain:
1212 if p1 not in remain and p2 not in remain:
1212 subset.append(n)
1213 subset.append(n)
1213 if heads:
1214 if heads:
1214 if p1 in heads:
1215 if p1 in heads:
1215 updated_heads[p1] = True
1216 updated_heads[p1] = True
1216 if p2 in heads:
1217 if p2 in heads:
1217 updated_heads[p2] = True
1218 updated_heads[p2] = True
1218
1219
1219 # this is the set of all roots we have to push
1220 # this is the set of all roots we have to push
1220 if heads:
1221 if heads:
1221 return subset, updated_heads.keys()
1222 return subset, updated_heads.keys()
1222 else:
1223 else:
1223 return subset
1224 return subset
1224
1225
1225 def pull(self, remote, heads=None, force=False, lock=None):
1226 def pull(self, remote, heads=None, force=False, lock=None):
1226 mylock = False
1227 mylock = False
1227 if not lock:
1228 if not lock:
1228 lock = self.lock()
1229 lock = self.lock()
1229 mylock = True
1230 mylock = True
1230
1231
1231 try:
1232 try:
1232 fetch = self.findincoming(remote, force=force)
1233 fetch = self.findincoming(remote, force=force)
1233 if fetch == [nullid]:
1234 if fetch == [nullid]:
1234 self.ui.status(_("requesting all changes\n"))
1235 self.ui.status(_("requesting all changes\n"))
1235
1236
1236 if not fetch:
1237 if not fetch:
1237 self.ui.status(_("no changes found\n"))
1238 self.ui.status(_("no changes found\n"))
1238 return 0
1239 return 0
1239
1240
1240 if heads is None:
1241 if heads is None:
1241 cg = remote.changegroup(fetch, 'pull')
1242 cg = remote.changegroup(fetch, 'pull')
1242 else:
1243 else:
1243 cg = remote.changegroupsubset(fetch, heads, 'pull')
1244 cg = remote.changegroupsubset(fetch, heads, 'pull')
1244 return self.addchangegroup(cg, 'pull', remote.url())
1245 return self.addchangegroup(cg, 'pull', remote.url())
1245 finally:
1246 finally:
1246 if mylock:
1247 if mylock:
1247 lock.release()
1248 lock.release()
1248
1249
1249 def push(self, remote, force=False, revs=None):
1250 def push(self, remote, force=False, revs=None):
1250 # there are two ways to push to remote repo:
1251 # there are two ways to push to remote repo:
1251 #
1252 #
1252 # addchangegroup assumes local user can lock remote
1253 # addchangegroup assumes local user can lock remote
1253 # repo (local filesystem, old ssh servers).
1254 # repo (local filesystem, old ssh servers).
1254 #
1255 #
1255 # unbundle assumes local user cannot lock remote repo (new ssh
1256 # unbundle assumes local user cannot lock remote repo (new ssh
1256 # servers, http servers).
1257 # servers, http servers).
1257
1258
1258 if remote.capable('unbundle'):
1259 if remote.capable('unbundle'):
1259 return self.push_unbundle(remote, force, revs)
1260 return self.push_unbundle(remote, force, revs)
1260 return self.push_addchangegroup(remote, force, revs)
1261 return self.push_addchangegroup(remote, force, revs)
1261
1262
1262 def prepush(self, remote, force, revs):
1263 def prepush(self, remote, force, revs):
1263 base = {}
1264 base = {}
1264 remote_heads = remote.heads()
1265 remote_heads = remote.heads()
1265 inc = self.findincoming(remote, base, remote_heads, force=force)
1266 inc = self.findincoming(remote, base, remote_heads, force=force)
1266 if not force and inc:
1267 if not force and inc:
1267 self.ui.warn(_("abort: unsynced remote changes!\n"))
1268 self.ui.warn(_("abort: unsynced remote changes!\n"))
1268 self.ui.status(_("(did you forget to sync?"
1269 self.ui.status(_("(did you forget to sync?"
1269 " use push -f to force)\n"))
1270 " use push -f to force)\n"))
1270 return None, 1
1271 return None, 1
1271
1272
1272 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1273 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1273 if revs is not None:
1274 if revs is not None:
1274 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1275 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1275 else:
1276 else:
1276 bases, heads = update, self.changelog.heads()
1277 bases, heads = update, self.changelog.heads()
1277
1278
1278 if not bases:
1279 if not bases:
1279 self.ui.status(_("no changes found\n"))
1280 self.ui.status(_("no changes found\n"))
1280 return None, 1
1281 return None, 1
1281 elif not force:
1282 elif not force:
1282 # FIXME we don't properly detect creation of new heads
1283 # FIXME we don't properly detect creation of new heads
1283 # in the push -r case, assume the user knows what he's doing
1284 # in the push -r case, assume the user knows what he's doing
1284 if not revs and len(remote_heads) < len(heads) \
1285 if not revs and len(remote_heads) < len(heads) \
1285 and remote_heads != [nullid]:
1286 and remote_heads != [nullid]:
1286 self.ui.warn(_("abort: push creates new remote branches!\n"))
1287 self.ui.warn(_("abort: push creates new remote branches!\n"))
1287 self.ui.status(_("(did you forget to merge?"
1288 self.ui.status(_("(did you forget to merge?"
1288 " use push -f to force)\n"))
1289 " use push -f to force)\n"))
1289 return None, 1
1290 return None, 1
1290
1291
1291 if revs is None:
1292 if revs is None:
1292 cg = self.changegroup(update, 'push')
1293 cg = self.changegroup(update, 'push')
1293 else:
1294 else:
1294 cg = self.changegroupsubset(update, revs, 'push')
1295 cg = self.changegroupsubset(update, revs, 'push')
1295 return cg, remote_heads
1296 return cg, remote_heads
1296
1297
1297 def push_addchangegroup(self, remote, force, revs):
1298 def push_addchangegroup(self, remote, force, revs):
1298 lock = remote.lock()
1299 lock = remote.lock()
1299
1300
1300 ret = self.prepush(remote, force, revs)
1301 ret = self.prepush(remote, force, revs)
1301 if ret[0] is not None:
1302 if ret[0] is not None:
1302 cg, remote_heads = ret
1303 cg, remote_heads = ret
1303 return remote.addchangegroup(cg, 'push', self.url())
1304 return remote.addchangegroup(cg, 'push', self.url())
1304 return ret[1]
1305 return ret[1]
1305
1306
1306 def push_unbundle(self, remote, force, revs):
1307 def push_unbundle(self, remote, force, revs):
1307 # local repo finds heads on server, finds out what revs it
1308 # local repo finds heads on server, finds out what revs it
1308 # must push. once revs transferred, if server finds it has
1309 # must push. once revs transferred, if server finds it has
1309 # different heads (someone else won commit/push race), server
1310 # different heads (someone else won commit/push race), server
1310 # aborts.
1311 # aborts.
1311
1312
1312 ret = self.prepush(remote, force, revs)
1313 ret = self.prepush(remote, force, revs)
1313 if ret[0] is not None:
1314 if ret[0] is not None:
1314 cg, remote_heads = ret
1315 cg, remote_heads = ret
1315 if force: remote_heads = ['force']
1316 if force: remote_heads = ['force']
1316 return remote.unbundle(cg, remote_heads, 'push')
1317 return remote.unbundle(cg, remote_heads, 'push')
1317 return ret[1]
1318 return ret[1]
1318
1319
1319 def changegroupsubset(self, bases, heads, source):
1320 def changegroupsubset(self, bases, heads, source):
1320 """This function generates a changegroup consisting of all the nodes
1321 """This function generates a changegroup consisting of all the nodes
1321 that are descendents of any of the bases, and ancestors of any of
1322 that are descendents of any of the bases, and ancestors of any of
1322 the heads.
1323 the heads.
1323
1324
1324 It is fairly complex as determining which filenodes and which
1325 It is fairly complex as determining which filenodes and which
1325 manifest nodes need to be included for the changeset to be complete
1326 manifest nodes need to be included for the changeset to be complete
1326 is non-trivial.
1327 is non-trivial.
1327
1328
1328 Another wrinkle is doing the reverse, figuring out which changeset in
1329 Another wrinkle is doing the reverse, figuring out which changeset in
1329 the changegroup a particular filenode or manifestnode belongs to."""
1330 the changegroup a particular filenode or manifestnode belongs to."""
1330
1331
1331 self.hook('preoutgoing', throw=True, source=source)
1332 self.hook('preoutgoing', throw=True, source=source)
1332
1333
1333 # Set up some initial variables
1334 # Set up some initial variables
1334 # Make it easy to refer to self.changelog
1335 # Make it easy to refer to self.changelog
1335 cl = self.changelog
1336 cl = self.changelog
1336 # msng is short for missing - compute the list of changesets in this
1337 # msng is short for missing - compute the list of changesets in this
1337 # changegroup.
1338 # changegroup.
1338 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1339 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1339 # Some bases may turn out to be superfluous, and some heads may be
1340 # Some bases may turn out to be superfluous, and some heads may be
1340 # too. nodesbetween will return the minimal set of bases and heads
1341 # too. nodesbetween will return the minimal set of bases and heads
1341 # necessary to re-create the changegroup.
1342 # necessary to re-create the changegroup.
1342
1343
1343 # Known heads are the list of heads that it is assumed the recipient
1344 # Known heads are the list of heads that it is assumed the recipient
1344 # of this changegroup will know about.
1345 # of this changegroup will know about.
1345 knownheads = {}
1346 knownheads = {}
1346 # We assume that all parents of bases are known heads.
1347 # We assume that all parents of bases are known heads.
1347 for n in bases:
1348 for n in bases:
1348 for p in cl.parents(n):
1349 for p in cl.parents(n):
1349 if p != nullid:
1350 if p != nullid:
1350 knownheads[p] = 1
1351 knownheads[p] = 1
1351 knownheads = knownheads.keys()
1352 knownheads = knownheads.keys()
1352 if knownheads:
1353 if knownheads:
1353 # Now that we know what heads are known, we can compute which
1354 # Now that we know what heads are known, we can compute which
1354 # changesets are known. The recipient must know about all
1355 # changesets are known. The recipient must know about all
1355 # changesets required to reach the known heads from the null
1356 # changesets required to reach the known heads from the null
1356 # changeset.
1357 # changeset.
1357 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1358 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1358 junk = None
1359 junk = None
1359 # Transform the list into an ersatz set.
1360 # Transform the list into an ersatz set.
1360 has_cl_set = dict.fromkeys(has_cl_set)
1361 has_cl_set = dict.fromkeys(has_cl_set)
1361 else:
1362 else:
1362 # If there were no known heads, the recipient cannot be assumed to
1363 # If there were no known heads, the recipient cannot be assumed to
1363 # know about any changesets.
1364 # know about any changesets.
1364 has_cl_set = {}
1365 has_cl_set = {}
1365
1366
1366 # Make it easy to refer to self.manifest
1367 # Make it easy to refer to self.manifest
1367 mnfst = self.manifest
1368 mnfst = self.manifest
1368 # We don't know which manifests are missing yet
1369 # We don't know which manifests are missing yet
1369 msng_mnfst_set = {}
1370 msng_mnfst_set = {}
1370 # Nor do we know which filenodes are missing.
1371 # Nor do we know which filenodes are missing.
1371 msng_filenode_set = {}
1372 msng_filenode_set = {}
1372
1373
1373 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1374 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1374 junk = None
1375 junk = None
1375
1376
1376 # A changeset always belongs to itself, so the changenode lookup
1377 # A changeset always belongs to itself, so the changenode lookup
1377 # function for a changenode is identity.
1378 # function for a changenode is identity.
1378 def identity(x):
1379 def identity(x):
1379 return x
1380 return x
1380
1381
1381 # A function generating function. Sets up an environment for the
1382 # A function generating function. Sets up an environment for the
1382 # inner function.
1383 # inner function.
1383 def cmp_by_rev_func(revlog):
1384 def cmp_by_rev_func(revlog):
1384 # Compare two nodes by their revision number in the environment's
1385 # Compare two nodes by their revision number in the environment's
1385 # revision history. Since the revision number both represents the
1386 # revision history. Since the revision number both represents the
1386 # most efficient order to read the nodes in, and represents a
1387 # most efficient order to read the nodes in, and represents a
1387 # topological sorting of the nodes, this function is often useful.
1388 # topological sorting of the nodes, this function is often useful.
1388 def cmp_by_rev(a, b):
1389 def cmp_by_rev(a, b):
1389 return cmp(revlog.rev(a), revlog.rev(b))
1390 return cmp(revlog.rev(a), revlog.rev(b))
1390 return cmp_by_rev
1391 return cmp_by_rev
1391
1392
1392 # If we determine that a particular file or manifest node must be a
1393 # If we determine that a particular file or manifest node must be a
1393 # node that the recipient of the changegroup will already have, we can
1394 # node that the recipient of the changegroup will already have, we can
1394 # also assume the recipient will have all the parents. This function
1395 # also assume the recipient will have all the parents. This function
1395 # prunes them from the set of missing nodes.
1396 # prunes them from the set of missing nodes.
1396 def prune_parents(revlog, hasset, msngset):
1397 def prune_parents(revlog, hasset, msngset):
1397 haslst = hasset.keys()
1398 haslst = hasset.keys()
1398 haslst.sort(cmp_by_rev_func(revlog))
1399 haslst.sort(cmp_by_rev_func(revlog))
1399 for node in haslst:
1400 for node in haslst:
1400 parentlst = [p for p in revlog.parents(node) if p != nullid]
1401 parentlst = [p for p in revlog.parents(node) if p != nullid]
1401 while parentlst:
1402 while parentlst:
1402 n = parentlst.pop()
1403 n = parentlst.pop()
1403 if n not in hasset:
1404 if n not in hasset:
1404 hasset[n] = 1
1405 hasset[n] = 1
1405 p = [p for p in revlog.parents(n) if p != nullid]
1406 p = [p for p in revlog.parents(n) if p != nullid]
1406 parentlst.extend(p)
1407 parentlst.extend(p)
1407 for n in hasset:
1408 for n in hasset:
1408 msngset.pop(n, None)
1409 msngset.pop(n, None)
1409
1410
1410 # This is a function generating function used to set up an environment
1411 # This is a function generating function used to set up an environment
1411 # for the inner function to execute in.
1412 # for the inner function to execute in.
1412 def manifest_and_file_collector(changedfileset):
1413 def manifest_and_file_collector(changedfileset):
1413 # This is an information gathering function that gathers
1414 # This is an information gathering function that gathers
1414 # information from each changeset node that goes out as part of
1415 # information from each changeset node that goes out as part of
1415 # the changegroup. The information gathered is a list of which
1416 # the changegroup. The information gathered is a list of which
1416 # manifest nodes are potentially required (the recipient may
1417 # manifest nodes are potentially required (the recipient may
1417 # already have them) and total list of all files which were
1418 # already have them) and total list of all files which were
1418 # changed in any changeset in the changegroup.
1419 # changed in any changeset in the changegroup.
1419 #
1420 #
1420 # We also remember the first changenode we saw any manifest
1421 # We also remember the first changenode we saw any manifest
1421 # referenced by so we can later determine which changenode 'owns'
1422 # referenced by so we can later determine which changenode 'owns'
1422 # the manifest.
1423 # the manifest.
1423 def collect_manifests_and_files(clnode):
1424 def collect_manifests_and_files(clnode):
1424 c = cl.read(clnode)
1425 c = cl.read(clnode)
1425 for f in c[3]:
1426 for f in c[3]:
1426 # This is to make sure we only have one instance of each
1427 # This is to make sure we only have one instance of each
1427 # filename string for each filename.
1428 # filename string for each filename.
1428 changedfileset.setdefault(f, f)
1429 changedfileset.setdefault(f, f)
1429 msng_mnfst_set.setdefault(c[0], clnode)
1430 msng_mnfst_set.setdefault(c[0], clnode)
1430 return collect_manifests_and_files
1431 return collect_manifests_and_files
1431
1432
1432 # Figure out which manifest nodes (of the ones we think might be part
1433 # Figure out which manifest nodes (of the ones we think might be part
1433 # of the changegroup) the recipient must know about and remove them
1434 # of the changegroup) the recipient must know about and remove them
1434 # from the changegroup.
1435 # from the changegroup.
1435 def prune_manifests():
1436 def prune_manifests():
1436 has_mnfst_set = {}
1437 has_mnfst_set = {}
1437 for n in msng_mnfst_set:
1438 for n in msng_mnfst_set:
1438 # If a 'missing' manifest thinks it belongs to a changenode
1439 # If a 'missing' manifest thinks it belongs to a changenode
1439 # the recipient is assumed to have, obviously the recipient
1440 # the recipient is assumed to have, obviously the recipient
1440 # must have that manifest.
1441 # must have that manifest.
1441 linknode = cl.node(mnfst.linkrev(n))
1442 linknode = cl.node(mnfst.linkrev(n))
1442 if linknode in has_cl_set:
1443 if linknode in has_cl_set:
1443 has_mnfst_set[n] = 1
1444 has_mnfst_set[n] = 1
1444 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1445 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1445
1446
1446 # Use the information collected in collect_manifests_and_files to say
1447 # Use the information collected in collect_manifests_and_files to say
1447 # which changenode any manifestnode belongs to.
1448 # which changenode any manifestnode belongs to.
1448 def lookup_manifest_link(mnfstnode):
1449 def lookup_manifest_link(mnfstnode):
1449 return msng_mnfst_set[mnfstnode]
1450 return msng_mnfst_set[mnfstnode]
1450
1451
1451 # A function generating function that sets up the initial environment
1452 # A function generating function that sets up the initial environment
1452 # the inner function.
1453 # the inner function.
1453 def filenode_collector(changedfiles):
1454 def filenode_collector(changedfiles):
1454 next_rev = [0]
1455 next_rev = [0]
1455 # This gathers information from each manifestnode included in the
1456 # This gathers information from each manifestnode included in the
1456 # changegroup about which filenodes the manifest node references
1457 # changegroup about which filenodes the manifest node references
1457 # so we can include those in the changegroup too.
1458 # so we can include those in the changegroup too.
1458 #
1459 #
1459 # It also remembers which changenode each filenode belongs to. It
1460 # It also remembers which changenode each filenode belongs to. It
1460 # does this by assuming the a filenode belongs to the changenode
1461 # does this by assuming the a filenode belongs to the changenode
1461 # the first manifest that references it belongs to.
1462 # the first manifest that references it belongs to.
1462 def collect_msng_filenodes(mnfstnode):
1463 def collect_msng_filenodes(mnfstnode):
1463 r = mnfst.rev(mnfstnode)
1464 r = mnfst.rev(mnfstnode)
1464 if r == next_rev[0]:
1465 if r == next_rev[0]:
1465 # If the last rev we looked at was the one just previous,
1466 # If the last rev we looked at was the one just previous,
1466 # we only need to see a diff.
1467 # we only need to see a diff.
1467 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1468 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1468 # For each line in the delta
1469 # For each line in the delta
1469 for dline in delta.splitlines():
1470 for dline in delta.splitlines():
1470 # get the filename and filenode for that line
1471 # get the filename and filenode for that line
1471 f, fnode = dline.split('\0')
1472 f, fnode = dline.split('\0')
1472 fnode = bin(fnode[:40])
1473 fnode = bin(fnode[:40])
1473 f = changedfiles.get(f, None)
1474 f = changedfiles.get(f, None)
1474 # And if the file is in the list of files we care
1475 # And if the file is in the list of files we care
1475 # about.
1476 # about.
1476 if f is not None:
1477 if f is not None:
1477 # Get the changenode this manifest belongs to
1478 # Get the changenode this manifest belongs to
1478 clnode = msng_mnfst_set[mnfstnode]
1479 clnode = msng_mnfst_set[mnfstnode]
1479 # Create the set of filenodes for the file if
1480 # Create the set of filenodes for the file if
1480 # there isn't one already.
1481 # there isn't one already.
1481 ndset = msng_filenode_set.setdefault(f, {})
1482 ndset = msng_filenode_set.setdefault(f, {})
1482 # And set the filenode's changelog node to the
1483 # And set the filenode's changelog node to the
1483 # manifest's if it hasn't been set already.
1484 # manifest's if it hasn't been set already.
1484 ndset.setdefault(fnode, clnode)
1485 ndset.setdefault(fnode, clnode)
1485 else:
1486 else:
1486 # Otherwise we need a full manifest.
1487 # Otherwise we need a full manifest.
1487 m = mnfst.read(mnfstnode)
1488 m = mnfst.read(mnfstnode)
1488 # For every file in we care about.
1489 # For every file in we care about.
1489 for f in changedfiles:
1490 for f in changedfiles:
1490 fnode = m.get(f, None)
1491 fnode = m.get(f, None)
1491 # If it's in the manifest
1492 # If it's in the manifest
1492 if fnode is not None:
1493 if fnode is not None:
1493 # See comments above.
1494 # See comments above.
1494 clnode = msng_mnfst_set[mnfstnode]
1495 clnode = msng_mnfst_set[mnfstnode]
1495 ndset = msng_filenode_set.setdefault(f, {})
1496 ndset = msng_filenode_set.setdefault(f, {})
1496 ndset.setdefault(fnode, clnode)
1497 ndset.setdefault(fnode, clnode)
1497 # Remember the revision we hope to see next.
1498 # Remember the revision we hope to see next.
1498 next_rev[0] = r + 1
1499 next_rev[0] = r + 1
1499 return collect_msng_filenodes
1500 return collect_msng_filenodes
1500
1501
1501 # We have a list of filenodes we think we need for a file, lets remove
1502 # We have a list of filenodes we think we need for a file, lets remove
1502 # all those we now the recipient must have.
1503 # all those we now the recipient must have.
1503 def prune_filenodes(f, filerevlog):
1504 def prune_filenodes(f, filerevlog):
1504 msngset = msng_filenode_set[f]
1505 msngset = msng_filenode_set[f]
1505 hasset = {}
1506 hasset = {}
1506 # If a 'missing' filenode thinks it belongs to a changenode we
1507 # If a 'missing' filenode thinks it belongs to a changenode we
1507 # assume the recipient must have, then the recipient must have
1508 # assume the recipient must have, then the recipient must have
1508 # that filenode.
1509 # that filenode.
1509 for n in msngset:
1510 for n in msngset:
1510 clnode = cl.node(filerevlog.linkrev(n))
1511 clnode = cl.node(filerevlog.linkrev(n))
1511 if clnode in has_cl_set:
1512 if clnode in has_cl_set:
1512 hasset[n] = 1
1513 hasset[n] = 1
1513 prune_parents(filerevlog, hasset, msngset)
1514 prune_parents(filerevlog, hasset, msngset)
1514
1515
1515 # A function generator function that sets up the a context for the
1516 # A function generator function that sets up the a context for the
1516 # inner function.
1517 # inner function.
1517 def lookup_filenode_link_func(fname):
1518 def lookup_filenode_link_func(fname):
1518 msngset = msng_filenode_set[fname]
1519 msngset = msng_filenode_set[fname]
1519 # Lookup the changenode the filenode belongs to.
1520 # Lookup the changenode the filenode belongs to.
1520 def lookup_filenode_link(fnode):
1521 def lookup_filenode_link(fnode):
1521 return msngset[fnode]
1522 return msngset[fnode]
1522 return lookup_filenode_link
1523 return lookup_filenode_link
1523
1524
1524 # Now that we have all theses utility functions to help out and
1525 # Now that we have all theses utility functions to help out and
1525 # logically divide up the task, generate the group.
1526 # logically divide up the task, generate the group.
1526 def gengroup():
1527 def gengroup():
1527 # The set of changed files starts empty.
1528 # The set of changed files starts empty.
1528 changedfiles = {}
1529 changedfiles = {}
1529 # Create a changenode group generator that will call our functions
1530 # Create a changenode group generator that will call our functions
1530 # back to lookup the owning changenode and collect information.
1531 # back to lookup the owning changenode and collect information.
1531 group = cl.group(msng_cl_lst, identity,
1532 group = cl.group(msng_cl_lst, identity,
1532 manifest_and_file_collector(changedfiles))
1533 manifest_and_file_collector(changedfiles))
1533 for chnk in group:
1534 for chnk in group:
1534 yield chnk
1535 yield chnk
1535
1536
1536 # The list of manifests has been collected by the generator
1537 # The list of manifests has been collected by the generator
1537 # calling our functions back.
1538 # calling our functions back.
1538 prune_manifests()
1539 prune_manifests()
1539 msng_mnfst_lst = msng_mnfst_set.keys()
1540 msng_mnfst_lst = msng_mnfst_set.keys()
1540 # Sort the manifestnodes by revision number.
1541 # Sort the manifestnodes by revision number.
1541 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1542 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1542 # Create a generator for the manifestnodes that calls our lookup
1543 # Create a generator for the manifestnodes that calls our lookup
1543 # and data collection functions back.
1544 # and data collection functions back.
1544 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1545 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1545 filenode_collector(changedfiles))
1546 filenode_collector(changedfiles))
1546 for chnk in group:
1547 for chnk in group:
1547 yield chnk
1548 yield chnk
1548
1549
1549 # These are no longer needed, dereference and toss the memory for
1550 # These are no longer needed, dereference and toss the memory for
1550 # them.
1551 # them.
1551 msng_mnfst_lst = None
1552 msng_mnfst_lst = None
1552 msng_mnfst_set.clear()
1553 msng_mnfst_set.clear()
1553
1554
1554 changedfiles = changedfiles.keys()
1555 changedfiles = changedfiles.keys()
1555 changedfiles.sort()
1556 changedfiles.sort()
1556 # Go through all our files in order sorted by name.
1557 # Go through all our files in order sorted by name.
1557 for fname in changedfiles:
1558 for fname in changedfiles:
1558 filerevlog = self.file(fname)
1559 filerevlog = self.file(fname)
1559 # Toss out the filenodes that the recipient isn't really
1560 # Toss out the filenodes that the recipient isn't really
1560 # missing.
1561 # missing.
1561 if msng_filenode_set.has_key(fname):
1562 if msng_filenode_set.has_key(fname):
1562 prune_filenodes(fname, filerevlog)
1563 prune_filenodes(fname, filerevlog)
1563 msng_filenode_lst = msng_filenode_set[fname].keys()
1564 msng_filenode_lst = msng_filenode_set[fname].keys()
1564 else:
1565 else:
1565 msng_filenode_lst = []
1566 msng_filenode_lst = []
1566 # If any filenodes are left, generate the group for them,
1567 # If any filenodes are left, generate the group for them,
1567 # otherwise don't bother.
1568 # otherwise don't bother.
1568 if len(msng_filenode_lst) > 0:
1569 if len(msng_filenode_lst) > 0:
1569 yield changegroup.genchunk(fname)
1570 yield changegroup.genchunk(fname)
1570 # Sort the filenodes by their revision #
1571 # Sort the filenodes by their revision #
1571 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1572 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1572 # Create a group generator and only pass in a changenode
1573 # Create a group generator and only pass in a changenode
1573 # lookup function as we need to collect no information
1574 # lookup function as we need to collect no information
1574 # from filenodes.
1575 # from filenodes.
1575 group = filerevlog.group(msng_filenode_lst,
1576 group = filerevlog.group(msng_filenode_lst,
1576 lookup_filenode_link_func(fname))
1577 lookup_filenode_link_func(fname))
1577 for chnk in group:
1578 for chnk in group:
1578 yield chnk
1579 yield chnk
1579 if msng_filenode_set.has_key(fname):
1580 if msng_filenode_set.has_key(fname):
1580 # Don't need this anymore, toss it to free memory.
1581 # Don't need this anymore, toss it to free memory.
1581 del msng_filenode_set[fname]
1582 del msng_filenode_set[fname]
1582 # Signal that no more groups are left.
1583 # Signal that no more groups are left.
1583 yield changegroup.closechunk()
1584 yield changegroup.closechunk()
1584
1585
1585 if msng_cl_lst:
1586 if msng_cl_lst:
1586 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1587 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1587
1588
1588 return util.chunkbuffer(gengroup())
1589 return util.chunkbuffer(gengroup())
1589
1590
1590 def changegroup(self, basenodes, source):
1591 def changegroup(self, basenodes, source):
1591 """Generate a changegroup of all nodes that we have that a recipient
1592 """Generate a changegroup of all nodes that we have that a recipient
1592 doesn't.
1593 doesn't.
1593
1594
1594 This is much easier than the previous function as we can assume that
1595 This is much easier than the previous function as we can assume that
1595 the recipient has any changenode we aren't sending them."""
1596 the recipient has any changenode we aren't sending them."""
1596
1597
1597 self.hook('preoutgoing', throw=True, source=source)
1598 self.hook('preoutgoing', throw=True, source=source)
1598
1599
1599 cl = self.changelog
1600 cl = self.changelog
1600 nodes = cl.nodesbetween(basenodes, None)[0]
1601 nodes = cl.nodesbetween(basenodes, None)[0]
1601 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1602 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1602
1603
1603 def identity(x):
1604 def identity(x):
1604 return x
1605 return x
1605
1606
1606 def gennodelst(revlog):
1607 def gennodelst(revlog):
1607 for r in xrange(0, revlog.count()):
1608 for r in xrange(0, revlog.count()):
1608 n = revlog.node(r)
1609 n = revlog.node(r)
1609 if revlog.linkrev(n) in revset:
1610 if revlog.linkrev(n) in revset:
1610 yield n
1611 yield n
1611
1612
1612 def changed_file_collector(changedfileset):
1613 def changed_file_collector(changedfileset):
1613 def collect_changed_files(clnode):
1614 def collect_changed_files(clnode):
1614 c = cl.read(clnode)
1615 c = cl.read(clnode)
1615 for fname in c[3]:
1616 for fname in c[3]:
1616 changedfileset[fname] = 1
1617 changedfileset[fname] = 1
1617 return collect_changed_files
1618 return collect_changed_files
1618
1619
1619 def lookuprevlink_func(revlog):
1620 def lookuprevlink_func(revlog):
1620 def lookuprevlink(n):
1621 def lookuprevlink(n):
1621 return cl.node(revlog.linkrev(n))
1622 return cl.node(revlog.linkrev(n))
1622 return lookuprevlink
1623 return lookuprevlink
1623
1624
1624 def gengroup():
1625 def gengroup():
1625 # construct a list of all changed files
1626 # construct a list of all changed files
1626 changedfiles = {}
1627 changedfiles = {}
1627
1628
1628 for chnk in cl.group(nodes, identity,
1629 for chnk in cl.group(nodes, identity,
1629 changed_file_collector(changedfiles)):
1630 changed_file_collector(changedfiles)):
1630 yield chnk
1631 yield chnk
1631 changedfiles = changedfiles.keys()
1632 changedfiles = changedfiles.keys()
1632 changedfiles.sort()
1633 changedfiles.sort()
1633
1634
1634 mnfst = self.manifest
1635 mnfst = self.manifest
1635 nodeiter = gennodelst(mnfst)
1636 nodeiter = gennodelst(mnfst)
1636 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1637 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1637 yield chnk
1638 yield chnk
1638
1639
1639 for fname in changedfiles:
1640 for fname in changedfiles:
1640 filerevlog = self.file(fname)
1641 filerevlog = self.file(fname)
1641 nodeiter = gennodelst(filerevlog)
1642 nodeiter = gennodelst(filerevlog)
1642 nodeiter = list(nodeiter)
1643 nodeiter = list(nodeiter)
1643 if nodeiter:
1644 if nodeiter:
1644 yield changegroup.genchunk(fname)
1645 yield changegroup.genchunk(fname)
1645 lookup = lookuprevlink_func(filerevlog)
1646 lookup = lookuprevlink_func(filerevlog)
1646 for chnk in filerevlog.group(nodeiter, lookup):
1647 for chnk in filerevlog.group(nodeiter, lookup):
1647 yield chnk
1648 yield chnk
1648
1649
1649 yield changegroup.closechunk()
1650 yield changegroup.closechunk()
1650
1651
1651 if nodes:
1652 if nodes:
1652 self.hook('outgoing', node=hex(nodes[0]), source=source)
1653 self.hook('outgoing', node=hex(nodes[0]), source=source)
1653
1654
1654 return util.chunkbuffer(gengroup())
1655 return util.chunkbuffer(gengroup())
1655
1656
1656 def addchangegroup(self, source, srctype, url):
1657 def addchangegroup(self, source, srctype, url):
1657 """add changegroup to repo.
1658 """add changegroup to repo.
1658 returns number of heads modified or added + 1."""
1659 returns number of heads modified or added + 1."""
1659
1660
1660 def csmap(x):
1661 def csmap(x):
1661 self.ui.debug(_("add changeset %s\n") % short(x))
1662 self.ui.debug(_("add changeset %s\n") % short(x))
1662 return cl.count()
1663 return cl.count()
1663
1664
1664 def revmap(x):
1665 def revmap(x):
1665 return cl.rev(x)
1666 return cl.rev(x)
1666
1667
1667 if not source:
1668 if not source:
1668 return 0
1669 return 0
1669
1670
1670 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1671 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1671
1672
1672 changesets = files = revisions = 0
1673 changesets = files = revisions = 0
1673
1674
1674 tr = self.transaction()
1675 tr = self.transaction()
1675
1676
1676 # write changelog data to temp files so concurrent readers will not see
1677 # write changelog data to temp files so concurrent readers will not see
1677 # inconsistent view
1678 # inconsistent view
1678 cl = None
1679 cl = None
1679 try:
1680 try:
1680 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1681 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1681
1682
1682 oldheads = len(cl.heads())
1683 oldheads = len(cl.heads())
1683
1684
1684 # pull off the changeset group
1685 # pull off the changeset group
1685 self.ui.status(_("adding changesets\n"))
1686 self.ui.status(_("adding changesets\n"))
1686 cor = cl.count() - 1
1687 cor = cl.count() - 1
1687 chunkiter = changegroup.chunkiter(source)
1688 chunkiter = changegroup.chunkiter(source)
1688 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1689 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1689 raise util.Abort(_("received changelog group is empty"))
1690 raise util.Abort(_("received changelog group is empty"))
1690 cnr = cl.count() - 1
1691 cnr = cl.count() - 1
1691 changesets = cnr - cor
1692 changesets = cnr - cor
1692
1693
1693 # pull off the manifest group
1694 # pull off the manifest group
1694 self.ui.status(_("adding manifests\n"))
1695 self.ui.status(_("adding manifests\n"))
1695 chunkiter = changegroup.chunkiter(source)
1696 chunkiter = changegroup.chunkiter(source)
1696 # no need to check for empty manifest group here:
1697 # no need to check for empty manifest group here:
1697 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1698 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1698 # no new manifest will be created and the manifest group will
1699 # no new manifest will be created and the manifest group will
1699 # be empty during the pull
1700 # be empty during the pull
1700 self.manifest.addgroup(chunkiter, revmap, tr)
1701 self.manifest.addgroup(chunkiter, revmap, tr)
1701
1702
1702 # process the files
1703 # process the files
1703 self.ui.status(_("adding file changes\n"))
1704 self.ui.status(_("adding file changes\n"))
1704 while 1:
1705 while 1:
1705 f = changegroup.getchunk(source)
1706 f = changegroup.getchunk(source)
1706 if not f:
1707 if not f:
1707 break
1708 break
1708 self.ui.debug(_("adding %s revisions\n") % f)
1709 self.ui.debug(_("adding %s revisions\n") % f)
1709 fl = self.file(f)
1710 fl = self.file(f)
1710 o = fl.count()
1711 o = fl.count()
1711 chunkiter = changegroup.chunkiter(source)
1712 chunkiter = changegroup.chunkiter(source)
1712 if fl.addgroup(chunkiter, revmap, tr) is None:
1713 if fl.addgroup(chunkiter, revmap, tr) is None:
1713 raise util.Abort(_("received file revlog group is empty"))
1714 raise util.Abort(_("received file revlog group is empty"))
1714 revisions += fl.count() - o
1715 revisions += fl.count() - o
1715 files += 1
1716 files += 1
1716
1717
1717 cl.writedata()
1718 cl.writedata()
1718 finally:
1719 finally:
1719 if cl:
1720 if cl:
1720 cl.cleanup()
1721 cl.cleanup()
1721
1722
1722 # make changelog see real files again
1723 # make changelog see real files again
1723 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1724 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1724 self.changelog.checkinlinesize(tr)
1725 self.changelog.checkinlinesize(tr)
1725
1726
1726 newheads = len(self.changelog.heads())
1727 newheads = len(self.changelog.heads())
1727 heads = ""
1728 heads = ""
1728 if oldheads and newheads != oldheads:
1729 if oldheads and newheads != oldheads:
1729 heads = _(" (%+d heads)") % (newheads - oldheads)
1730 heads = _(" (%+d heads)") % (newheads - oldheads)
1730
1731
1731 self.ui.status(_("added %d changesets"
1732 self.ui.status(_("added %d changesets"
1732 " with %d changes to %d files%s\n")
1733 " with %d changes to %d files%s\n")
1733 % (changesets, revisions, files, heads))
1734 % (changesets, revisions, files, heads))
1734
1735
1735 if changesets > 0:
1736 if changesets > 0:
1736 self.hook('pretxnchangegroup', throw=True,
1737 self.hook('pretxnchangegroup', throw=True,
1737 node=hex(self.changelog.node(cor+1)), source=srctype,
1738 node=hex(self.changelog.node(cor+1)), source=srctype,
1738 url=url)
1739 url=url)
1739
1740
1740 tr.close()
1741 tr.close()
1741
1742
1742 if changesets > 0:
1743 if changesets > 0:
1743 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1744 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1744 source=srctype, url=url)
1745 source=srctype, url=url)
1745
1746
1746 for i in range(cor + 1, cnr + 1):
1747 for i in range(cor + 1, cnr + 1):
1747 self.hook("incoming", node=hex(self.changelog.node(i)),
1748 self.hook("incoming", node=hex(self.changelog.node(i)),
1748 source=srctype, url=url)
1749 source=srctype, url=url)
1749
1750
1750 return newheads - oldheads + 1
1751 return newheads - oldheads + 1
1751
1752
1752
1753
1753 def stream_in(self, remote):
1754 def stream_in(self, remote):
1754 fp = remote.stream_out()
1755 fp = remote.stream_out()
1755 resp = int(fp.readline())
1756 resp = int(fp.readline())
1756 if resp != 0:
1757 if resp != 0:
1757 raise util.Abort(_('operation forbidden by server'))
1758 raise util.Abort(_('operation forbidden by server'))
1758 self.ui.status(_('streaming all changes\n'))
1759 self.ui.status(_('streaming all changes\n'))
1759 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1760 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1760 self.ui.status(_('%d files to transfer, %s of data\n') %
1761 self.ui.status(_('%d files to transfer, %s of data\n') %
1761 (total_files, util.bytecount(total_bytes)))
1762 (total_files, util.bytecount(total_bytes)))
1762 start = time.time()
1763 start = time.time()
1763 for i in xrange(total_files):
1764 for i in xrange(total_files):
1764 name, size = fp.readline().split('\0', 1)
1765 name, size = fp.readline().split('\0', 1)
1765 size = int(size)
1766 size = int(size)
1766 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1767 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1767 ofp = self.opener(name, 'w')
1768 ofp = self.opener(name, 'w')
1768 for chunk in util.filechunkiter(fp, limit=size):
1769 for chunk in util.filechunkiter(fp, limit=size):
1769 ofp.write(chunk)
1770 ofp.write(chunk)
1770 ofp.close()
1771 ofp.close()
1771 elapsed = time.time() - start
1772 elapsed = time.time() - start
1772 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1773 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1773 (util.bytecount(total_bytes), elapsed,
1774 (util.bytecount(total_bytes), elapsed,
1774 util.bytecount(total_bytes / elapsed)))
1775 util.bytecount(total_bytes / elapsed)))
1775 self.reload()
1776 self.reload()
1776 return len(self.heads()) + 1
1777 return len(self.heads()) + 1
1777
1778
1778 def clone(self, remote, heads=[], stream=False):
1779 def clone(self, remote, heads=[], stream=False):
1779 '''clone remote repository.
1780 '''clone remote repository.
1780
1781
1781 keyword arguments:
1782 keyword arguments:
1782 heads: list of revs to clone (forces use of pull)
1783 heads: list of revs to clone (forces use of pull)
1783 stream: use streaming clone if possible'''
1784 stream: use streaming clone if possible'''
1784
1785
1785 # now, all clients that can request uncompressed clones can
1786 # now, all clients that can request uncompressed clones can
1786 # read repo formats supported by all servers that can serve
1787 # read repo formats supported by all servers that can serve
1787 # them.
1788 # them.
1788
1789
1789 # if revlog format changes, client will have to check version
1790 # if revlog format changes, client will have to check version
1790 # and format flags on "stream" capability, and use
1791 # and format flags on "stream" capability, and use
1791 # uncompressed only if compatible.
1792 # uncompressed only if compatible.
1792
1793
1793 if stream and not heads and remote.capable('stream'):
1794 if stream and not heads and remote.capable('stream'):
1794 return self.stream_in(remote)
1795 return self.stream_in(remote)
1795 return self.pull(remote, heads)
1796 return self.pull(remote, heads)
1796
1797
1797 # used to avoid circular references so destructors work
1798 # used to avoid circular references so destructors work
1798 def aftertrans(base):
1799 def aftertrans(base):
1799 p = base
1800 p = base
1800 def a():
1801 def a():
1801 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1802 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1802 util.rename(os.path.join(p, "journal.dirstate"),
1803 util.rename(os.path.join(p, "journal.dirstate"),
1803 os.path.join(p, "undo.dirstate"))
1804 os.path.join(p, "undo.dirstate"))
1804 return a
1805 return a
1805
1806
1806 def instance(ui, path, create):
1807 def instance(ui, path, create):
1807 return localrepository(ui, util.drop_scheme('file', path), create)
1808 return localrepository(ui, util.drop_scheme('file', path), create)
1808
1809
1809 def islocal(path):
1810 def islocal(path):
1810 return True
1811 return True
General Comments 0
You need to be logged in to leave comments. Login now