##// END OF EJS Templates
Split branchtags into two additional functions....
Alexis S. L. Carvalho -
r3491:23cffef5 default
parent child Browse files
Show More
@@ -1,1817 +1,1825
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {} # avoid recursion in changectx
296 self.branchcache = {} # avoid recursion in changectx
297
297
298 partial, last, lrev = self._readbranchcache()
299
300 tiprev = self.changelog.count() - 1
301 if lrev != tiprev:
302 self._updatebranchcache(partial, lrev+1, tiprev+1)
303 self._writebranchcache(partial, self.changelog.tip(), tiprev)
304
305 self.branchcache = partial
306 return self.branchcache
307
308 def _readbranchcache(self):
309 partial = {}
298 try:
310 try:
299 f = self.opener("branches.cache")
311 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
312 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
313 last, lrev = bin(last), int(lrev)
302 if (lrev < self.changelog.count() and
314 if (lrev < self.changelog.count() and
303 self.changelog.node(lrev) == last): # sanity check
315 self.changelog.node(lrev) == last): # sanity check
304 for l in f:
316 for l in f:
305 node, label = l.rstrip().split(" ", 1)
317 node, label = l.rstrip().split(" ", 1)
306 self.branchcache[label] = bin(node)
318 partial[label] = bin(node)
307 else: # invalidate the cache
319 else: # invalidate the cache
308 last, lrev = nullid, -1
320 last, lrev = nullid, -1
309 f.close()
321 f.close()
310 except IOError:
322 except IOError:
311 last, lrev = nullid, -1
323 last, lrev = nullid, -1
324 return partial, last, lrev
312
325
313 tip = self.changelog.count() - 1
326 def _writebranchcache(self, branches, tip, tiprev):
314 if lrev != tip:
327 try:
315 for r in xrange(lrev + 1, tip + 1):
328 f = self.opener("branches.cache", "w")
329 f.write("%s %s\n" % (hex(tip), tiprev))
330 for label, node in branches.iteritems():
331 f.write("%s %s\n" % (hex(node), label))
332 except IOError:
333 pass
334
335 def _updatebranchcache(self, partial, start, end):
336 for r in xrange(start, end):
316 c = self.changectx(r)
337 c = self.changectx(r)
317 b = c.branch()
338 b = c.branch()
318 if b:
339 if b:
319 self.branchcache[b] = c.node()
340 partial[b] = c.node()
320 self._writebranchcache()
321
322 return self.branchcache
323
324 def _writebranchcache(self):
325 try:
326 f = self.opener("branches.cache", "w")
327 t = self.changelog.tip()
328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
329 for label, node in self.branchcache.iteritems():
330 f.write("%s %s\n" % (hex(node), label))
331 except IOError:
332 pass
333
341
334 def lookup(self, key):
342 def lookup(self, key):
335 if key == '.':
343 if key == '.':
336 key = self.dirstate.parents()[0]
344 key = self.dirstate.parents()[0]
337 if key == nullid:
345 if key == nullid:
338 raise repo.RepoError(_("no revision checked out"))
346 raise repo.RepoError(_("no revision checked out"))
339 if key in self.tags():
347 if key in self.tags():
340 return self.tags()[key]
348 return self.tags()[key]
341 if key in self.branchtags():
349 if key in self.branchtags():
342 return self.branchtags()[key]
350 return self.branchtags()[key]
343 try:
351 try:
344 return self.changelog.lookup(key)
352 return self.changelog.lookup(key)
345 except:
353 except:
346 raise repo.RepoError(_("unknown revision '%s'") % key)
354 raise repo.RepoError(_("unknown revision '%s'") % key)
347
355
348 def dev(self):
356 def dev(self):
349 return os.lstat(self.path).st_dev
357 return os.lstat(self.path).st_dev
350
358
351 def local(self):
359 def local(self):
352 return True
360 return True
353
361
354 def join(self, f):
362 def join(self, f):
355 return os.path.join(self.path, f)
363 return os.path.join(self.path, f)
356
364
357 def wjoin(self, f):
365 def wjoin(self, f):
358 return os.path.join(self.root, f)
366 return os.path.join(self.root, f)
359
367
360 def file(self, f):
368 def file(self, f):
361 if f[0] == '/':
369 if f[0] == '/':
362 f = f[1:]
370 f = f[1:]
363 return filelog.filelog(self.opener, f, self.revlogversion)
371 return filelog.filelog(self.opener, f, self.revlogversion)
364
372
365 def changectx(self, changeid=None):
373 def changectx(self, changeid=None):
366 return context.changectx(self, changeid)
374 return context.changectx(self, changeid)
367
375
368 def workingctx(self):
376 def workingctx(self):
369 return context.workingctx(self)
377 return context.workingctx(self)
370
378
371 def parents(self, changeid=None):
379 def parents(self, changeid=None):
372 '''
380 '''
373 get list of changectxs for parents of changeid or working directory
381 get list of changectxs for parents of changeid or working directory
374 '''
382 '''
375 if changeid is None:
383 if changeid is None:
376 pl = self.dirstate.parents()
384 pl = self.dirstate.parents()
377 else:
385 else:
378 n = self.changelog.lookup(changeid)
386 n = self.changelog.lookup(changeid)
379 pl = self.changelog.parents(n)
387 pl = self.changelog.parents(n)
380 if pl[1] == nullid:
388 if pl[1] == nullid:
381 return [self.changectx(pl[0])]
389 return [self.changectx(pl[0])]
382 return [self.changectx(pl[0]), self.changectx(pl[1])]
390 return [self.changectx(pl[0]), self.changectx(pl[1])]
383
391
384 def filectx(self, path, changeid=None, fileid=None):
392 def filectx(self, path, changeid=None, fileid=None):
385 """changeid can be a changeset revision, node, or tag.
393 """changeid can be a changeset revision, node, or tag.
386 fileid can be a file revision or node."""
394 fileid can be a file revision or node."""
387 return context.filectx(self, path, changeid, fileid)
395 return context.filectx(self, path, changeid, fileid)
388
396
389 def getcwd(self):
397 def getcwd(self):
390 return self.dirstate.getcwd()
398 return self.dirstate.getcwd()
391
399
392 def wfile(self, f, mode='r'):
400 def wfile(self, f, mode='r'):
393 return self.wopener(f, mode)
401 return self.wopener(f, mode)
394
402
395 def wread(self, filename):
403 def wread(self, filename):
396 if self.encodepats == None:
404 if self.encodepats == None:
397 l = []
405 l = []
398 for pat, cmd in self.ui.configitems("encode"):
406 for pat, cmd in self.ui.configitems("encode"):
399 mf = util.matcher(self.root, "", [pat], [], [])[1]
407 mf = util.matcher(self.root, "", [pat], [], [])[1]
400 l.append((mf, cmd))
408 l.append((mf, cmd))
401 self.encodepats = l
409 self.encodepats = l
402
410
403 data = self.wopener(filename, 'r').read()
411 data = self.wopener(filename, 'r').read()
404
412
405 for mf, cmd in self.encodepats:
413 for mf, cmd in self.encodepats:
406 if mf(filename):
414 if mf(filename):
407 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
415 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
408 data = util.filter(data, cmd)
416 data = util.filter(data, cmd)
409 break
417 break
410
418
411 return data
419 return data
412
420
413 def wwrite(self, filename, data, fd=None):
421 def wwrite(self, filename, data, fd=None):
414 if self.decodepats == None:
422 if self.decodepats == None:
415 l = []
423 l = []
416 for pat, cmd in self.ui.configitems("decode"):
424 for pat, cmd in self.ui.configitems("decode"):
417 mf = util.matcher(self.root, "", [pat], [], [])[1]
425 mf = util.matcher(self.root, "", [pat], [], [])[1]
418 l.append((mf, cmd))
426 l.append((mf, cmd))
419 self.decodepats = l
427 self.decodepats = l
420
428
421 for mf, cmd in self.decodepats:
429 for mf, cmd in self.decodepats:
422 if mf(filename):
430 if mf(filename):
423 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
431 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
424 data = util.filter(data, cmd)
432 data = util.filter(data, cmd)
425 break
433 break
426
434
427 if fd:
435 if fd:
428 return fd.write(data)
436 return fd.write(data)
429 return self.wopener(filename, 'w').write(data)
437 return self.wopener(filename, 'w').write(data)
430
438
431 def transaction(self):
439 def transaction(self):
432 tr = self.transhandle
440 tr = self.transhandle
433 if tr != None and tr.running():
441 if tr != None and tr.running():
434 return tr.nest()
442 return tr.nest()
435
443
436 # save dirstate for rollback
444 # save dirstate for rollback
437 try:
445 try:
438 ds = self.opener("dirstate").read()
446 ds = self.opener("dirstate").read()
439 except IOError:
447 except IOError:
440 ds = ""
448 ds = ""
441 self.opener("journal.dirstate", "w").write(ds)
449 self.opener("journal.dirstate", "w").write(ds)
442
450
443 tr = transaction.transaction(self.ui.warn, self.opener,
451 tr = transaction.transaction(self.ui.warn, self.opener,
444 self.join("journal"),
452 self.join("journal"),
445 aftertrans(self.path))
453 aftertrans(self.path))
446 self.transhandle = tr
454 self.transhandle = tr
447 return tr
455 return tr
448
456
449 def recover(self):
457 def recover(self):
450 l = self.lock()
458 l = self.lock()
451 if os.path.exists(self.join("journal")):
459 if os.path.exists(self.join("journal")):
452 self.ui.status(_("rolling back interrupted transaction\n"))
460 self.ui.status(_("rolling back interrupted transaction\n"))
453 transaction.rollback(self.opener, self.join("journal"))
461 transaction.rollback(self.opener, self.join("journal"))
454 self.reload()
462 self.reload()
455 return True
463 return True
456 else:
464 else:
457 self.ui.warn(_("no interrupted transaction available\n"))
465 self.ui.warn(_("no interrupted transaction available\n"))
458 return False
466 return False
459
467
460 def rollback(self, wlock=None):
468 def rollback(self, wlock=None):
461 if not wlock:
469 if not wlock:
462 wlock = self.wlock()
470 wlock = self.wlock()
463 l = self.lock()
471 l = self.lock()
464 if os.path.exists(self.join("undo")):
472 if os.path.exists(self.join("undo")):
465 self.ui.status(_("rolling back last transaction\n"))
473 self.ui.status(_("rolling back last transaction\n"))
466 transaction.rollback(self.opener, self.join("undo"))
474 transaction.rollback(self.opener, self.join("undo"))
467 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
475 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
468 self.reload()
476 self.reload()
469 self.wreload()
477 self.wreload()
470 else:
478 else:
471 self.ui.warn(_("no rollback information available\n"))
479 self.ui.warn(_("no rollback information available\n"))
472
480
473 def wreload(self):
481 def wreload(self):
474 self.dirstate.read()
482 self.dirstate.read()
475
483
476 def reload(self):
484 def reload(self):
477 self.changelog.load()
485 self.changelog.load()
478 self.manifest.load()
486 self.manifest.load()
479 self.tagscache = None
487 self.tagscache = None
480 self.nodetagscache = None
488 self.nodetagscache = None
481
489
482 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
490 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
483 desc=None):
491 desc=None):
484 try:
492 try:
485 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
493 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
486 except lock.LockHeld, inst:
494 except lock.LockHeld, inst:
487 if not wait:
495 if not wait:
488 raise
496 raise
489 self.ui.warn(_("waiting for lock on %s held by %s\n") %
497 self.ui.warn(_("waiting for lock on %s held by %s\n") %
490 (desc, inst.args[0]))
498 (desc, inst.args[0]))
491 # default to 600 seconds timeout
499 # default to 600 seconds timeout
492 l = lock.lock(self.join(lockname),
500 l = lock.lock(self.join(lockname),
493 int(self.ui.config("ui", "timeout") or 600),
501 int(self.ui.config("ui", "timeout") or 600),
494 releasefn, desc=desc)
502 releasefn, desc=desc)
495 if acquirefn:
503 if acquirefn:
496 acquirefn()
504 acquirefn()
497 return l
505 return l
498
506
499 def lock(self, wait=1):
507 def lock(self, wait=1):
500 return self.do_lock("lock", wait, acquirefn=self.reload,
508 return self.do_lock("lock", wait, acquirefn=self.reload,
501 desc=_('repository %s') % self.origroot)
509 desc=_('repository %s') % self.origroot)
502
510
503 def wlock(self, wait=1):
511 def wlock(self, wait=1):
504 return self.do_lock("wlock", wait, self.dirstate.write,
512 return self.do_lock("wlock", wait, self.dirstate.write,
505 self.wreload,
513 self.wreload,
506 desc=_('working directory of %s') % self.origroot)
514 desc=_('working directory of %s') % self.origroot)
507
515
508 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
516 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
509 """
517 """
510 commit an individual file as part of a larger transaction
518 commit an individual file as part of a larger transaction
511 """
519 """
512
520
513 t = self.wread(fn)
521 t = self.wread(fn)
514 fl = self.file(fn)
522 fl = self.file(fn)
515 fp1 = manifest1.get(fn, nullid)
523 fp1 = manifest1.get(fn, nullid)
516 fp2 = manifest2.get(fn, nullid)
524 fp2 = manifest2.get(fn, nullid)
517
525
518 meta = {}
526 meta = {}
519 cp = self.dirstate.copied(fn)
527 cp = self.dirstate.copied(fn)
520 if cp:
528 if cp:
521 meta["copy"] = cp
529 meta["copy"] = cp
522 if not manifest2: # not a branch merge
530 if not manifest2: # not a branch merge
523 meta["copyrev"] = hex(manifest1.get(cp, nullid))
531 meta["copyrev"] = hex(manifest1.get(cp, nullid))
524 fp2 = nullid
532 fp2 = nullid
525 elif fp2 != nullid: # copied on remote side
533 elif fp2 != nullid: # copied on remote side
526 meta["copyrev"] = hex(manifest1.get(cp, nullid))
534 meta["copyrev"] = hex(manifest1.get(cp, nullid))
527 else: # copied on local side, reversed
535 else: # copied on local side, reversed
528 meta["copyrev"] = hex(manifest2.get(cp))
536 meta["copyrev"] = hex(manifest2.get(cp))
529 fp2 = nullid
537 fp2 = nullid
530 self.ui.debug(_(" %s: copy %s:%s\n") %
538 self.ui.debug(_(" %s: copy %s:%s\n") %
531 (fn, cp, meta["copyrev"]))
539 (fn, cp, meta["copyrev"]))
532 fp1 = nullid
540 fp1 = nullid
533 elif fp2 != nullid:
541 elif fp2 != nullid:
534 # is one parent an ancestor of the other?
542 # is one parent an ancestor of the other?
535 fpa = fl.ancestor(fp1, fp2)
543 fpa = fl.ancestor(fp1, fp2)
536 if fpa == fp1:
544 if fpa == fp1:
537 fp1, fp2 = fp2, nullid
545 fp1, fp2 = fp2, nullid
538 elif fpa == fp2:
546 elif fpa == fp2:
539 fp2 = nullid
547 fp2 = nullid
540
548
541 # is the file unmodified from the parent? report existing entry
549 # is the file unmodified from the parent? report existing entry
542 if fp2 == nullid and not fl.cmp(fp1, t):
550 if fp2 == nullid and not fl.cmp(fp1, t):
543 return fp1
551 return fp1
544
552
545 changelist.append(fn)
553 changelist.append(fn)
546 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
554 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
547
555
548 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
556 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
549 orig_parent = self.dirstate.parents()[0] or nullid
557 orig_parent = self.dirstate.parents()[0] or nullid
550 p1 = p1 or self.dirstate.parents()[0] or nullid
558 p1 = p1 or self.dirstate.parents()[0] or nullid
551 p2 = p2 or self.dirstate.parents()[1] or nullid
559 p2 = p2 or self.dirstate.parents()[1] or nullid
552 c1 = self.changelog.read(p1)
560 c1 = self.changelog.read(p1)
553 c2 = self.changelog.read(p2)
561 c2 = self.changelog.read(p2)
554 m1 = self.manifest.read(c1[0]).copy()
562 m1 = self.manifest.read(c1[0]).copy()
555 m2 = self.manifest.read(c2[0])
563 m2 = self.manifest.read(c2[0])
556 changed = []
564 changed = []
557 removed = []
565 removed = []
558
566
559 if orig_parent == p1:
567 if orig_parent == p1:
560 update_dirstate = 1
568 update_dirstate = 1
561 else:
569 else:
562 update_dirstate = 0
570 update_dirstate = 0
563
571
564 if not wlock:
572 if not wlock:
565 wlock = self.wlock()
573 wlock = self.wlock()
566 l = self.lock()
574 l = self.lock()
567 tr = self.transaction()
575 tr = self.transaction()
568 linkrev = self.changelog.count()
576 linkrev = self.changelog.count()
569 for f in files:
577 for f in files:
570 try:
578 try:
571 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
579 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
572 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
580 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
573 except IOError:
581 except IOError:
574 try:
582 try:
575 del m1[f]
583 del m1[f]
576 if update_dirstate:
584 if update_dirstate:
577 self.dirstate.forget([f])
585 self.dirstate.forget([f])
578 removed.append(f)
586 removed.append(f)
579 except:
587 except:
580 # deleted from p2?
588 # deleted from p2?
581 pass
589 pass
582
590
583 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
591 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
584 user = user or self.ui.username()
592 user = user or self.ui.username()
585 n = self.changelog.add(mnode, changed + removed, text,
593 n = self.changelog.add(mnode, changed + removed, text,
586 tr, p1, p2, user, date)
594 tr, p1, p2, user, date)
587 tr.close()
595 tr.close()
588 if update_dirstate:
596 if update_dirstate:
589 self.dirstate.setparents(n, nullid)
597 self.dirstate.setparents(n, nullid)
590
598
591 def commit(self, files=None, text="", user=None, date=None,
599 def commit(self, files=None, text="", user=None, date=None,
592 match=util.always, force=False, lock=None, wlock=None,
600 match=util.always, force=False, lock=None, wlock=None,
593 force_editor=False):
601 force_editor=False):
594 commit = []
602 commit = []
595 remove = []
603 remove = []
596 changed = []
604 changed = []
597
605
598 if files:
606 if files:
599 for f in files:
607 for f in files:
600 s = self.dirstate.state(f)
608 s = self.dirstate.state(f)
601 if s in 'nmai':
609 if s in 'nmai':
602 commit.append(f)
610 commit.append(f)
603 elif s == 'r':
611 elif s == 'r':
604 remove.append(f)
612 remove.append(f)
605 else:
613 else:
606 self.ui.warn(_("%s not tracked!\n") % f)
614 self.ui.warn(_("%s not tracked!\n") % f)
607 else:
615 else:
608 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
616 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
609 commit = modified + added
617 commit = modified + added
610 remove = removed
618 remove = removed
611
619
612 p1, p2 = self.dirstate.parents()
620 p1, p2 = self.dirstate.parents()
613 c1 = self.changelog.read(p1)
621 c1 = self.changelog.read(p1)
614 c2 = self.changelog.read(p2)
622 c2 = self.changelog.read(p2)
615 m1 = self.manifest.read(c1[0]).copy()
623 m1 = self.manifest.read(c1[0]).copy()
616 m2 = self.manifest.read(c2[0])
624 m2 = self.manifest.read(c2[0])
617
625
618 branchname = self.workingctx().branch()
626 branchname = self.workingctx().branch()
619 oldname = c1[5].get("branch", "")
627 oldname = c1[5].get("branch", "")
620
628
621 if not commit and not remove and not force and p2 == nullid and \
629 if not commit and not remove and not force and p2 == nullid and \
622 branchname == oldname:
630 branchname == oldname:
623 self.ui.status(_("nothing changed\n"))
631 self.ui.status(_("nothing changed\n"))
624 return None
632 return None
625
633
626 xp1 = hex(p1)
634 xp1 = hex(p1)
627 if p2 == nullid: xp2 = ''
635 if p2 == nullid: xp2 = ''
628 else: xp2 = hex(p2)
636 else: xp2 = hex(p2)
629
637
630 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
638 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
631
639
632 if not wlock:
640 if not wlock:
633 wlock = self.wlock()
641 wlock = self.wlock()
634 if not lock:
642 if not lock:
635 lock = self.lock()
643 lock = self.lock()
636 tr = self.transaction()
644 tr = self.transaction()
637
645
638 # check in files
646 # check in files
639 new = {}
647 new = {}
640 linkrev = self.changelog.count()
648 linkrev = self.changelog.count()
641 commit.sort()
649 commit.sort()
642 for f in commit:
650 for f in commit:
643 self.ui.note(f + "\n")
651 self.ui.note(f + "\n")
644 try:
652 try:
645 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
653 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
646 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
654 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
647 except IOError:
655 except IOError:
648 self.ui.warn(_("trouble committing %s!\n") % f)
656 self.ui.warn(_("trouble committing %s!\n") % f)
649 raise
657 raise
650
658
651 # update manifest
659 # update manifest
652 m1.update(new)
660 m1.update(new)
653 for f in remove:
661 for f in remove:
654 if f in m1:
662 if f in m1:
655 del m1[f]
663 del m1[f]
656 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
664 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
657
665
658 # add changeset
666 # add changeset
659 new = new.keys()
667 new = new.keys()
660 new.sort()
668 new.sort()
661
669
662 user = user or self.ui.username()
670 user = user or self.ui.username()
663 if not text or force_editor:
671 if not text or force_editor:
664 edittext = []
672 edittext = []
665 if text:
673 if text:
666 edittext.append(text)
674 edittext.append(text)
667 edittext.append("")
675 edittext.append("")
668 if p2 != nullid:
676 if p2 != nullid:
669 edittext.append("HG: branch merge")
677 edittext.append("HG: branch merge")
670 edittext.extend(["HG: changed %s" % f for f in changed])
678 edittext.extend(["HG: changed %s" % f for f in changed])
671 edittext.extend(["HG: removed %s" % f for f in remove])
679 edittext.extend(["HG: removed %s" % f for f in remove])
672 if not changed and not remove:
680 if not changed and not remove:
673 edittext.append("HG: no files changed")
681 edittext.append("HG: no files changed")
674 edittext.append("")
682 edittext.append("")
675 # run editor in the repository root
683 # run editor in the repository root
676 olddir = os.getcwd()
684 olddir = os.getcwd()
677 os.chdir(self.root)
685 os.chdir(self.root)
678 text = self.ui.edit("\n".join(edittext), user)
686 text = self.ui.edit("\n".join(edittext), user)
679 os.chdir(olddir)
687 os.chdir(olddir)
680
688
681 lines = [line.rstrip() for line in text.rstrip().splitlines()]
689 lines = [line.rstrip() for line in text.rstrip().splitlines()]
682 while lines and not lines[0]:
690 while lines and not lines[0]:
683 del lines[0]
691 del lines[0]
684 if not lines:
692 if not lines:
685 return None
693 return None
686 text = '\n'.join(lines)
694 text = '\n'.join(lines)
687 extra = {}
695 extra = {}
688 if branchname:
696 if branchname:
689 extra["branch"] = branchname
697 extra["branch"] = branchname
690 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
698 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
691 user, date, extra)
699 user, date, extra)
692 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
700 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
693 parent2=xp2)
701 parent2=xp2)
694 tr.close()
702 tr.close()
695
703
696 self.dirstate.setparents(n)
704 self.dirstate.setparents(n)
697 self.dirstate.update(new, "n")
705 self.dirstate.update(new, "n")
698 self.dirstate.forget(remove)
706 self.dirstate.forget(remove)
699
707
700 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
708 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
701 return n
709 return n
702
710
703 def walk(self, node=None, files=[], match=util.always, badmatch=None):
711 def walk(self, node=None, files=[], match=util.always, badmatch=None):
704 if node:
712 if node:
705 fdict = dict.fromkeys(files)
713 fdict = dict.fromkeys(files)
706 for fn in self.manifest.read(self.changelog.read(node)[0]):
714 for fn in self.manifest.read(self.changelog.read(node)[0]):
707 for ffn in fdict:
715 for ffn in fdict:
708 # match if the file is the exact name or a directory
716 # match if the file is the exact name or a directory
709 if ffn == fn or fn.startswith("%s/" % ffn):
717 if ffn == fn or fn.startswith("%s/" % ffn):
710 del fdict[ffn]
718 del fdict[ffn]
711 break
719 break
712 if match(fn):
720 if match(fn):
713 yield 'm', fn
721 yield 'm', fn
714 for fn in fdict:
722 for fn in fdict:
715 if badmatch and badmatch(fn):
723 if badmatch and badmatch(fn):
716 if match(fn):
724 if match(fn):
717 yield 'b', fn
725 yield 'b', fn
718 else:
726 else:
719 self.ui.warn(_('%s: No such file in rev %s\n') % (
727 self.ui.warn(_('%s: No such file in rev %s\n') % (
720 util.pathto(self.getcwd(), fn), short(node)))
728 util.pathto(self.getcwd(), fn), short(node)))
721 else:
729 else:
722 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
730 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
723 yield src, fn
731 yield src, fn
724
732
725 def status(self, node1=None, node2=None, files=[], match=util.always,
733 def status(self, node1=None, node2=None, files=[], match=util.always,
726 wlock=None, list_ignored=False, list_clean=False):
734 wlock=None, list_ignored=False, list_clean=False):
727 """return status of files between two nodes or node and working directory
735 """return status of files between two nodes or node and working directory
728
736
729 If node1 is None, use the first dirstate parent instead.
737 If node1 is None, use the first dirstate parent instead.
730 If node2 is None, compare node1 with working directory.
738 If node2 is None, compare node1 with working directory.
731 """
739 """
732
740
733 def fcmp(fn, mf):
741 def fcmp(fn, mf):
734 t1 = self.wread(fn)
742 t1 = self.wread(fn)
735 return self.file(fn).cmp(mf.get(fn, nullid), t1)
743 return self.file(fn).cmp(mf.get(fn, nullid), t1)
736
744
737 def mfmatches(node):
745 def mfmatches(node):
738 change = self.changelog.read(node)
746 change = self.changelog.read(node)
739 mf = self.manifest.read(change[0]).copy()
747 mf = self.manifest.read(change[0]).copy()
740 for fn in mf.keys():
748 for fn in mf.keys():
741 if not match(fn):
749 if not match(fn):
742 del mf[fn]
750 del mf[fn]
743 return mf
751 return mf
744
752
745 modified, added, removed, deleted, unknown = [], [], [], [], []
753 modified, added, removed, deleted, unknown = [], [], [], [], []
746 ignored, clean = [], []
754 ignored, clean = [], []
747
755
748 compareworking = False
756 compareworking = False
749 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
757 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
750 compareworking = True
758 compareworking = True
751
759
752 if not compareworking:
760 if not compareworking:
753 # read the manifest from node1 before the manifest from node2,
761 # read the manifest from node1 before the manifest from node2,
754 # so that we'll hit the manifest cache if we're going through
762 # so that we'll hit the manifest cache if we're going through
755 # all the revisions in parent->child order.
763 # all the revisions in parent->child order.
756 mf1 = mfmatches(node1)
764 mf1 = mfmatches(node1)
757
765
758 # are we comparing the working directory?
766 # are we comparing the working directory?
759 if not node2:
767 if not node2:
760 if not wlock:
768 if not wlock:
761 try:
769 try:
762 wlock = self.wlock(wait=0)
770 wlock = self.wlock(wait=0)
763 except lock.LockException:
771 except lock.LockException:
764 wlock = None
772 wlock = None
765 (lookup, modified, added, removed, deleted, unknown,
773 (lookup, modified, added, removed, deleted, unknown,
766 ignored, clean) = self.dirstate.status(files, match,
774 ignored, clean) = self.dirstate.status(files, match,
767 list_ignored, list_clean)
775 list_ignored, list_clean)
768
776
769 # are we comparing working dir against its parent?
777 # are we comparing working dir against its parent?
770 if compareworking:
778 if compareworking:
771 if lookup:
779 if lookup:
772 # do a full compare of any files that might have changed
780 # do a full compare of any files that might have changed
773 mf2 = mfmatches(self.dirstate.parents()[0])
781 mf2 = mfmatches(self.dirstate.parents()[0])
774 for f in lookup:
782 for f in lookup:
775 if fcmp(f, mf2):
783 if fcmp(f, mf2):
776 modified.append(f)
784 modified.append(f)
777 else:
785 else:
778 clean.append(f)
786 clean.append(f)
779 if wlock is not None:
787 if wlock is not None:
780 self.dirstate.update([f], "n")
788 self.dirstate.update([f], "n")
781 else:
789 else:
782 # we are comparing working dir against non-parent
790 # we are comparing working dir against non-parent
783 # generate a pseudo-manifest for the working dir
791 # generate a pseudo-manifest for the working dir
784 # XXX: create it in dirstate.py ?
792 # XXX: create it in dirstate.py ?
785 mf2 = mfmatches(self.dirstate.parents()[0])
793 mf2 = mfmatches(self.dirstate.parents()[0])
786 for f in lookup + modified + added:
794 for f in lookup + modified + added:
787 mf2[f] = ""
795 mf2[f] = ""
788 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
796 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
789 for f in removed:
797 for f in removed:
790 if f in mf2:
798 if f in mf2:
791 del mf2[f]
799 del mf2[f]
792 else:
800 else:
793 # we are comparing two revisions
801 # we are comparing two revisions
794 mf2 = mfmatches(node2)
802 mf2 = mfmatches(node2)
795
803
796 if not compareworking:
804 if not compareworking:
797 # flush lists from dirstate before comparing manifests
805 # flush lists from dirstate before comparing manifests
798 modified, added, clean = [], [], []
806 modified, added, clean = [], [], []
799
807
800 # make sure to sort the files so we talk to the disk in a
808 # make sure to sort the files so we talk to the disk in a
801 # reasonable order
809 # reasonable order
802 mf2keys = mf2.keys()
810 mf2keys = mf2.keys()
803 mf2keys.sort()
811 mf2keys.sort()
804 for fn in mf2keys:
812 for fn in mf2keys:
805 if mf1.has_key(fn):
813 if mf1.has_key(fn):
806 if mf1.flags(fn) != mf2.flags(fn) or \
814 if mf1.flags(fn) != mf2.flags(fn) or \
807 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
815 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
808 modified.append(fn)
816 modified.append(fn)
809 elif list_clean:
817 elif list_clean:
810 clean.append(fn)
818 clean.append(fn)
811 del mf1[fn]
819 del mf1[fn]
812 else:
820 else:
813 added.append(fn)
821 added.append(fn)
814
822
815 removed = mf1.keys()
823 removed = mf1.keys()
816
824
817 # sort and return results:
825 # sort and return results:
818 for l in modified, added, removed, deleted, unknown, ignored, clean:
826 for l in modified, added, removed, deleted, unknown, ignored, clean:
819 l.sort()
827 l.sort()
820 return (modified, added, removed, deleted, unknown, ignored, clean)
828 return (modified, added, removed, deleted, unknown, ignored, clean)
821
829
822 def add(self, list, wlock=None):
830 def add(self, list, wlock=None):
823 if not wlock:
831 if not wlock:
824 wlock = self.wlock()
832 wlock = self.wlock()
825 for f in list:
833 for f in list:
826 p = self.wjoin(f)
834 p = self.wjoin(f)
827 if not os.path.exists(p):
835 if not os.path.exists(p):
828 self.ui.warn(_("%s does not exist!\n") % f)
836 self.ui.warn(_("%s does not exist!\n") % f)
829 elif not os.path.isfile(p):
837 elif not os.path.isfile(p):
830 self.ui.warn(_("%s not added: only files supported currently\n")
838 self.ui.warn(_("%s not added: only files supported currently\n")
831 % f)
839 % f)
832 elif self.dirstate.state(f) in 'an':
840 elif self.dirstate.state(f) in 'an':
833 self.ui.warn(_("%s already tracked!\n") % f)
841 self.ui.warn(_("%s already tracked!\n") % f)
834 else:
842 else:
835 self.dirstate.update([f], "a")
843 self.dirstate.update([f], "a")
836
844
837 def forget(self, list, wlock=None):
845 def forget(self, list, wlock=None):
838 if not wlock:
846 if not wlock:
839 wlock = self.wlock()
847 wlock = self.wlock()
840 for f in list:
848 for f in list:
841 if self.dirstate.state(f) not in 'ai':
849 if self.dirstate.state(f) not in 'ai':
842 self.ui.warn(_("%s not added!\n") % f)
850 self.ui.warn(_("%s not added!\n") % f)
843 else:
851 else:
844 self.dirstate.forget([f])
852 self.dirstate.forget([f])
845
853
846 def remove(self, list, unlink=False, wlock=None):
854 def remove(self, list, unlink=False, wlock=None):
847 if unlink:
855 if unlink:
848 for f in list:
856 for f in list:
849 try:
857 try:
850 util.unlink(self.wjoin(f))
858 util.unlink(self.wjoin(f))
851 except OSError, inst:
859 except OSError, inst:
852 if inst.errno != errno.ENOENT:
860 if inst.errno != errno.ENOENT:
853 raise
861 raise
854 if not wlock:
862 if not wlock:
855 wlock = self.wlock()
863 wlock = self.wlock()
856 for f in list:
864 for f in list:
857 p = self.wjoin(f)
865 p = self.wjoin(f)
858 if os.path.exists(p):
866 if os.path.exists(p):
859 self.ui.warn(_("%s still exists!\n") % f)
867 self.ui.warn(_("%s still exists!\n") % f)
860 elif self.dirstate.state(f) == 'a':
868 elif self.dirstate.state(f) == 'a':
861 self.dirstate.forget([f])
869 self.dirstate.forget([f])
862 elif f not in self.dirstate:
870 elif f not in self.dirstate:
863 self.ui.warn(_("%s not tracked!\n") % f)
871 self.ui.warn(_("%s not tracked!\n") % f)
864 else:
872 else:
865 self.dirstate.update([f], "r")
873 self.dirstate.update([f], "r")
866
874
867 def undelete(self, list, wlock=None):
875 def undelete(self, list, wlock=None):
868 p = self.dirstate.parents()[0]
876 p = self.dirstate.parents()[0]
869 mn = self.changelog.read(p)[0]
877 mn = self.changelog.read(p)[0]
870 m = self.manifest.read(mn)
878 m = self.manifest.read(mn)
871 if not wlock:
879 if not wlock:
872 wlock = self.wlock()
880 wlock = self.wlock()
873 for f in list:
881 for f in list:
874 if self.dirstate.state(f) not in "r":
882 if self.dirstate.state(f) not in "r":
875 self.ui.warn("%s not removed!\n" % f)
883 self.ui.warn("%s not removed!\n" % f)
876 else:
884 else:
877 t = self.file(f).read(m[f])
885 t = self.file(f).read(m[f])
878 self.wwrite(f, t)
886 self.wwrite(f, t)
879 util.set_exec(self.wjoin(f), m.execf(f))
887 util.set_exec(self.wjoin(f), m.execf(f))
880 self.dirstate.update([f], "n")
888 self.dirstate.update([f], "n")
881
889
882 def copy(self, source, dest, wlock=None):
890 def copy(self, source, dest, wlock=None):
883 p = self.wjoin(dest)
891 p = self.wjoin(dest)
884 if not os.path.exists(p):
892 if not os.path.exists(p):
885 self.ui.warn(_("%s does not exist!\n") % dest)
893 self.ui.warn(_("%s does not exist!\n") % dest)
886 elif not os.path.isfile(p):
894 elif not os.path.isfile(p):
887 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
895 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
888 else:
896 else:
889 if not wlock:
897 if not wlock:
890 wlock = self.wlock()
898 wlock = self.wlock()
891 if self.dirstate.state(dest) == '?':
899 if self.dirstate.state(dest) == '?':
892 self.dirstate.update([dest], "a")
900 self.dirstate.update([dest], "a")
893 self.dirstate.copy(source, dest)
901 self.dirstate.copy(source, dest)
894
902
895 def heads(self, start=None):
903 def heads(self, start=None):
896 heads = self.changelog.heads(start)
904 heads = self.changelog.heads(start)
897 # sort the output in rev descending order
905 # sort the output in rev descending order
898 heads = [(-self.changelog.rev(h), h) for h in heads]
906 heads = [(-self.changelog.rev(h), h) for h in heads]
899 heads.sort()
907 heads.sort()
900 return [n for (r, n) in heads]
908 return [n for (r, n) in heads]
901
909
902 # branchlookup returns a dict giving a list of branches for
910 # branchlookup returns a dict giving a list of branches for
903 # each head. A branch is defined as the tag of a node or
911 # each head. A branch is defined as the tag of a node or
904 # the branch of the node's parents. If a node has multiple
912 # the branch of the node's parents. If a node has multiple
905 # branch tags, tags are eliminated if they are visible from other
913 # branch tags, tags are eliminated if they are visible from other
906 # branch tags.
914 # branch tags.
907 #
915 #
908 # So, for this graph: a->b->c->d->e
916 # So, for this graph: a->b->c->d->e
909 # \ /
917 # \ /
910 # aa -----/
918 # aa -----/
911 # a has tag 2.6.12
919 # a has tag 2.6.12
912 # d has tag 2.6.13
920 # d has tag 2.6.13
913 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
921 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
914 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
922 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
915 # from the list.
923 # from the list.
916 #
924 #
917 # It is possible that more than one head will have the same branch tag.
925 # It is possible that more than one head will have the same branch tag.
918 # callers need to check the result for multiple heads under the same
926 # callers need to check the result for multiple heads under the same
919 # branch tag if that is a problem for them (ie checkout of a specific
927 # branch tag if that is a problem for them (ie checkout of a specific
920 # branch).
928 # branch).
921 #
929 #
922 # passing in a specific branch will limit the depth of the search
930 # passing in a specific branch will limit the depth of the search
923 # through the parents. It won't limit the branches returned in the
931 # through the parents. It won't limit the branches returned in the
924 # result though.
932 # result though.
925 def branchlookup(self, heads=None, branch=None):
933 def branchlookup(self, heads=None, branch=None):
926 if not heads:
934 if not heads:
927 heads = self.heads()
935 heads = self.heads()
928 headt = [ h for h in heads ]
936 headt = [ h for h in heads ]
929 chlog = self.changelog
937 chlog = self.changelog
930 branches = {}
938 branches = {}
931 merges = []
939 merges = []
932 seenmerge = {}
940 seenmerge = {}
933
941
934 # traverse the tree once for each head, recording in the branches
942 # traverse the tree once for each head, recording in the branches
935 # dict which tags are visible from this head. The branches
943 # dict which tags are visible from this head. The branches
936 # dict also records which tags are visible from each tag
944 # dict also records which tags are visible from each tag
937 # while we traverse.
945 # while we traverse.
938 while headt or merges:
946 while headt or merges:
939 if merges:
947 if merges:
940 n, found = merges.pop()
948 n, found = merges.pop()
941 visit = [n]
949 visit = [n]
942 else:
950 else:
943 h = headt.pop()
951 h = headt.pop()
944 visit = [h]
952 visit = [h]
945 found = [h]
953 found = [h]
946 seen = {}
954 seen = {}
947 while visit:
955 while visit:
948 n = visit.pop()
956 n = visit.pop()
949 if n in seen:
957 if n in seen:
950 continue
958 continue
951 pp = chlog.parents(n)
959 pp = chlog.parents(n)
952 tags = self.nodetags(n)
960 tags = self.nodetags(n)
953 if tags:
961 if tags:
954 for x in tags:
962 for x in tags:
955 if x == 'tip':
963 if x == 'tip':
956 continue
964 continue
957 for f in found:
965 for f in found:
958 branches.setdefault(f, {})[n] = 1
966 branches.setdefault(f, {})[n] = 1
959 branches.setdefault(n, {})[n] = 1
967 branches.setdefault(n, {})[n] = 1
960 break
968 break
961 if n not in found:
969 if n not in found:
962 found.append(n)
970 found.append(n)
963 if branch in tags:
971 if branch in tags:
964 continue
972 continue
965 seen[n] = 1
973 seen[n] = 1
966 if pp[1] != nullid and n not in seenmerge:
974 if pp[1] != nullid and n not in seenmerge:
967 merges.append((pp[1], [x for x in found]))
975 merges.append((pp[1], [x for x in found]))
968 seenmerge[n] = 1
976 seenmerge[n] = 1
969 if pp[0] != nullid:
977 if pp[0] != nullid:
970 visit.append(pp[0])
978 visit.append(pp[0])
971 # traverse the branches dict, eliminating branch tags from each
979 # traverse the branches dict, eliminating branch tags from each
972 # head that are visible from another branch tag for that head.
980 # head that are visible from another branch tag for that head.
973 out = {}
981 out = {}
974 viscache = {}
982 viscache = {}
975 for h in heads:
983 for h in heads:
976 def visible(node):
984 def visible(node):
977 if node in viscache:
985 if node in viscache:
978 return viscache[node]
986 return viscache[node]
979 ret = {}
987 ret = {}
980 visit = [node]
988 visit = [node]
981 while visit:
989 while visit:
982 x = visit.pop()
990 x = visit.pop()
983 if x in viscache:
991 if x in viscache:
984 ret.update(viscache[x])
992 ret.update(viscache[x])
985 elif x not in ret:
993 elif x not in ret:
986 ret[x] = 1
994 ret[x] = 1
987 if x in branches:
995 if x in branches:
988 visit[len(visit):] = branches[x].keys()
996 visit[len(visit):] = branches[x].keys()
989 viscache[node] = ret
997 viscache[node] = ret
990 return ret
998 return ret
991 if h not in branches:
999 if h not in branches:
992 continue
1000 continue
993 # O(n^2), but somewhat limited. This only searches the
1001 # O(n^2), but somewhat limited. This only searches the
994 # tags visible from a specific head, not all the tags in the
1002 # tags visible from a specific head, not all the tags in the
995 # whole repo.
1003 # whole repo.
996 for b in branches[h]:
1004 for b in branches[h]:
997 vis = False
1005 vis = False
998 for bb in branches[h].keys():
1006 for bb in branches[h].keys():
999 if b != bb:
1007 if b != bb:
1000 if b in visible(bb):
1008 if b in visible(bb):
1001 vis = True
1009 vis = True
1002 break
1010 break
1003 if not vis:
1011 if not vis:
1004 l = out.setdefault(h, [])
1012 l = out.setdefault(h, [])
1005 l[len(l):] = self.nodetags(b)
1013 l[len(l):] = self.nodetags(b)
1006 return out
1014 return out
1007
1015
1008 def branches(self, nodes):
1016 def branches(self, nodes):
1009 if not nodes:
1017 if not nodes:
1010 nodes = [self.changelog.tip()]
1018 nodes = [self.changelog.tip()]
1011 b = []
1019 b = []
1012 for n in nodes:
1020 for n in nodes:
1013 t = n
1021 t = n
1014 while 1:
1022 while 1:
1015 p = self.changelog.parents(n)
1023 p = self.changelog.parents(n)
1016 if p[1] != nullid or p[0] == nullid:
1024 if p[1] != nullid or p[0] == nullid:
1017 b.append((t, n, p[0], p[1]))
1025 b.append((t, n, p[0], p[1]))
1018 break
1026 break
1019 n = p[0]
1027 n = p[0]
1020 return b
1028 return b
1021
1029
1022 def between(self, pairs):
1030 def between(self, pairs):
1023 r = []
1031 r = []
1024
1032
1025 for top, bottom in pairs:
1033 for top, bottom in pairs:
1026 n, l, i = top, [], 0
1034 n, l, i = top, [], 0
1027 f = 1
1035 f = 1
1028
1036
1029 while n != bottom:
1037 while n != bottom:
1030 p = self.changelog.parents(n)[0]
1038 p = self.changelog.parents(n)[0]
1031 if i == f:
1039 if i == f:
1032 l.append(n)
1040 l.append(n)
1033 f = f * 2
1041 f = f * 2
1034 n = p
1042 n = p
1035 i += 1
1043 i += 1
1036
1044
1037 r.append(l)
1045 r.append(l)
1038
1046
1039 return r
1047 return r
1040
1048
1041 def findincoming(self, remote, base=None, heads=None, force=False):
1049 def findincoming(self, remote, base=None, heads=None, force=False):
1042 """Return list of roots of the subsets of missing nodes from remote
1050 """Return list of roots of the subsets of missing nodes from remote
1043
1051
1044 If base dict is specified, assume that these nodes and their parents
1052 If base dict is specified, assume that these nodes and their parents
1045 exist on the remote side and that no child of a node of base exists
1053 exist on the remote side and that no child of a node of base exists
1046 in both remote and self.
1054 in both remote and self.
1047 Furthermore base will be updated to include the nodes that exists
1055 Furthermore base will be updated to include the nodes that exists
1048 in self and remote but no children exists in self and remote.
1056 in self and remote but no children exists in self and remote.
1049 If a list of heads is specified, return only nodes which are heads
1057 If a list of heads is specified, return only nodes which are heads
1050 or ancestors of these heads.
1058 or ancestors of these heads.
1051
1059
1052 All the ancestors of base are in self and in remote.
1060 All the ancestors of base are in self and in remote.
1053 All the descendants of the list returned are missing in self.
1061 All the descendants of the list returned are missing in self.
1054 (and so we know that the rest of the nodes are missing in remote, see
1062 (and so we know that the rest of the nodes are missing in remote, see
1055 outgoing)
1063 outgoing)
1056 """
1064 """
1057 m = self.changelog.nodemap
1065 m = self.changelog.nodemap
1058 search = []
1066 search = []
1059 fetch = {}
1067 fetch = {}
1060 seen = {}
1068 seen = {}
1061 seenbranch = {}
1069 seenbranch = {}
1062 if base == None:
1070 if base == None:
1063 base = {}
1071 base = {}
1064
1072
1065 if not heads:
1073 if not heads:
1066 heads = remote.heads()
1074 heads = remote.heads()
1067
1075
1068 if self.changelog.tip() == nullid:
1076 if self.changelog.tip() == nullid:
1069 base[nullid] = 1
1077 base[nullid] = 1
1070 if heads != [nullid]:
1078 if heads != [nullid]:
1071 return [nullid]
1079 return [nullid]
1072 return []
1080 return []
1073
1081
1074 # assume we're closer to the tip than the root
1082 # assume we're closer to the tip than the root
1075 # and start by examining the heads
1083 # and start by examining the heads
1076 self.ui.status(_("searching for changes\n"))
1084 self.ui.status(_("searching for changes\n"))
1077
1085
1078 unknown = []
1086 unknown = []
1079 for h in heads:
1087 for h in heads:
1080 if h not in m:
1088 if h not in m:
1081 unknown.append(h)
1089 unknown.append(h)
1082 else:
1090 else:
1083 base[h] = 1
1091 base[h] = 1
1084
1092
1085 if not unknown:
1093 if not unknown:
1086 return []
1094 return []
1087
1095
1088 req = dict.fromkeys(unknown)
1096 req = dict.fromkeys(unknown)
1089 reqcnt = 0
1097 reqcnt = 0
1090
1098
1091 # search through remote branches
1099 # search through remote branches
1092 # a 'branch' here is a linear segment of history, with four parts:
1100 # a 'branch' here is a linear segment of history, with four parts:
1093 # head, root, first parent, second parent
1101 # head, root, first parent, second parent
1094 # (a branch always has two parents (or none) by definition)
1102 # (a branch always has two parents (or none) by definition)
1095 unknown = remote.branches(unknown)
1103 unknown = remote.branches(unknown)
1096 while unknown:
1104 while unknown:
1097 r = []
1105 r = []
1098 while unknown:
1106 while unknown:
1099 n = unknown.pop(0)
1107 n = unknown.pop(0)
1100 if n[0] in seen:
1108 if n[0] in seen:
1101 continue
1109 continue
1102
1110
1103 self.ui.debug(_("examining %s:%s\n")
1111 self.ui.debug(_("examining %s:%s\n")
1104 % (short(n[0]), short(n[1])))
1112 % (short(n[0]), short(n[1])))
1105 if n[0] == nullid: # found the end of the branch
1113 if n[0] == nullid: # found the end of the branch
1106 pass
1114 pass
1107 elif n in seenbranch:
1115 elif n in seenbranch:
1108 self.ui.debug(_("branch already found\n"))
1116 self.ui.debug(_("branch already found\n"))
1109 continue
1117 continue
1110 elif n[1] and n[1] in m: # do we know the base?
1118 elif n[1] and n[1] in m: # do we know the base?
1111 self.ui.debug(_("found incomplete branch %s:%s\n")
1119 self.ui.debug(_("found incomplete branch %s:%s\n")
1112 % (short(n[0]), short(n[1])))
1120 % (short(n[0]), short(n[1])))
1113 search.append(n) # schedule branch range for scanning
1121 search.append(n) # schedule branch range for scanning
1114 seenbranch[n] = 1
1122 seenbranch[n] = 1
1115 else:
1123 else:
1116 if n[1] not in seen and n[1] not in fetch:
1124 if n[1] not in seen and n[1] not in fetch:
1117 if n[2] in m and n[3] in m:
1125 if n[2] in m and n[3] in m:
1118 self.ui.debug(_("found new changeset %s\n") %
1126 self.ui.debug(_("found new changeset %s\n") %
1119 short(n[1]))
1127 short(n[1]))
1120 fetch[n[1]] = 1 # earliest unknown
1128 fetch[n[1]] = 1 # earliest unknown
1121 for p in n[2:4]:
1129 for p in n[2:4]:
1122 if p in m:
1130 if p in m:
1123 base[p] = 1 # latest known
1131 base[p] = 1 # latest known
1124
1132
1125 for p in n[2:4]:
1133 for p in n[2:4]:
1126 if p not in req and p not in m:
1134 if p not in req and p not in m:
1127 r.append(p)
1135 r.append(p)
1128 req[p] = 1
1136 req[p] = 1
1129 seen[n[0]] = 1
1137 seen[n[0]] = 1
1130
1138
1131 if r:
1139 if r:
1132 reqcnt += 1
1140 reqcnt += 1
1133 self.ui.debug(_("request %d: %s\n") %
1141 self.ui.debug(_("request %d: %s\n") %
1134 (reqcnt, " ".join(map(short, r))))
1142 (reqcnt, " ".join(map(short, r))))
1135 for p in xrange(0, len(r), 10):
1143 for p in xrange(0, len(r), 10):
1136 for b in remote.branches(r[p:p+10]):
1144 for b in remote.branches(r[p:p+10]):
1137 self.ui.debug(_("received %s:%s\n") %
1145 self.ui.debug(_("received %s:%s\n") %
1138 (short(b[0]), short(b[1])))
1146 (short(b[0]), short(b[1])))
1139 unknown.append(b)
1147 unknown.append(b)
1140
1148
1141 # do binary search on the branches we found
1149 # do binary search on the branches we found
1142 while search:
1150 while search:
1143 n = search.pop(0)
1151 n = search.pop(0)
1144 reqcnt += 1
1152 reqcnt += 1
1145 l = remote.between([(n[0], n[1])])[0]
1153 l = remote.between([(n[0], n[1])])[0]
1146 l.append(n[1])
1154 l.append(n[1])
1147 p = n[0]
1155 p = n[0]
1148 f = 1
1156 f = 1
1149 for i in l:
1157 for i in l:
1150 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1158 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1151 if i in m:
1159 if i in m:
1152 if f <= 2:
1160 if f <= 2:
1153 self.ui.debug(_("found new branch changeset %s\n") %
1161 self.ui.debug(_("found new branch changeset %s\n") %
1154 short(p))
1162 short(p))
1155 fetch[p] = 1
1163 fetch[p] = 1
1156 base[i] = 1
1164 base[i] = 1
1157 else:
1165 else:
1158 self.ui.debug(_("narrowed branch search to %s:%s\n")
1166 self.ui.debug(_("narrowed branch search to %s:%s\n")
1159 % (short(p), short(i)))
1167 % (short(p), short(i)))
1160 search.append((p, i))
1168 search.append((p, i))
1161 break
1169 break
1162 p, f = i, f * 2
1170 p, f = i, f * 2
1163
1171
1164 # sanity check our fetch list
1172 # sanity check our fetch list
1165 for f in fetch.keys():
1173 for f in fetch.keys():
1166 if f in m:
1174 if f in m:
1167 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1175 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1168
1176
1169 if base.keys() == [nullid]:
1177 if base.keys() == [nullid]:
1170 if force:
1178 if force:
1171 self.ui.warn(_("warning: repository is unrelated\n"))
1179 self.ui.warn(_("warning: repository is unrelated\n"))
1172 else:
1180 else:
1173 raise util.Abort(_("repository is unrelated"))
1181 raise util.Abort(_("repository is unrelated"))
1174
1182
1175 self.ui.debug(_("found new changesets starting at ") +
1183 self.ui.debug(_("found new changesets starting at ") +
1176 " ".join([short(f) for f in fetch]) + "\n")
1184 " ".join([short(f) for f in fetch]) + "\n")
1177
1185
1178 self.ui.debug(_("%d total queries\n") % reqcnt)
1186 self.ui.debug(_("%d total queries\n") % reqcnt)
1179
1187
1180 return fetch.keys()
1188 return fetch.keys()
1181
1189
1182 def findoutgoing(self, remote, base=None, heads=None, force=False):
1190 def findoutgoing(self, remote, base=None, heads=None, force=False):
1183 """Return list of nodes that are roots of subsets not in remote
1191 """Return list of nodes that are roots of subsets not in remote
1184
1192
1185 If base dict is specified, assume that these nodes and their parents
1193 If base dict is specified, assume that these nodes and their parents
1186 exist on the remote side.
1194 exist on the remote side.
1187 If a list of heads is specified, return only nodes which are heads
1195 If a list of heads is specified, return only nodes which are heads
1188 or ancestors of these heads, and return a second element which
1196 or ancestors of these heads, and return a second element which
1189 contains all remote heads which get new children.
1197 contains all remote heads which get new children.
1190 """
1198 """
1191 if base == None:
1199 if base == None:
1192 base = {}
1200 base = {}
1193 self.findincoming(remote, base, heads, force=force)
1201 self.findincoming(remote, base, heads, force=force)
1194
1202
1195 self.ui.debug(_("common changesets up to ")
1203 self.ui.debug(_("common changesets up to ")
1196 + " ".join(map(short, base.keys())) + "\n")
1204 + " ".join(map(short, base.keys())) + "\n")
1197
1205
1198 remain = dict.fromkeys(self.changelog.nodemap)
1206 remain = dict.fromkeys(self.changelog.nodemap)
1199
1207
1200 # prune everything remote has from the tree
1208 # prune everything remote has from the tree
1201 del remain[nullid]
1209 del remain[nullid]
1202 remove = base.keys()
1210 remove = base.keys()
1203 while remove:
1211 while remove:
1204 n = remove.pop(0)
1212 n = remove.pop(0)
1205 if n in remain:
1213 if n in remain:
1206 del remain[n]
1214 del remain[n]
1207 for p in self.changelog.parents(n):
1215 for p in self.changelog.parents(n):
1208 remove.append(p)
1216 remove.append(p)
1209
1217
1210 # find every node whose parents have been pruned
1218 # find every node whose parents have been pruned
1211 subset = []
1219 subset = []
1212 # find every remote head that will get new children
1220 # find every remote head that will get new children
1213 updated_heads = {}
1221 updated_heads = {}
1214 for n in remain:
1222 for n in remain:
1215 p1, p2 = self.changelog.parents(n)
1223 p1, p2 = self.changelog.parents(n)
1216 if p1 not in remain and p2 not in remain:
1224 if p1 not in remain and p2 not in remain:
1217 subset.append(n)
1225 subset.append(n)
1218 if heads:
1226 if heads:
1219 if p1 in heads:
1227 if p1 in heads:
1220 updated_heads[p1] = True
1228 updated_heads[p1] = True
1221 if p2 in heads:
1229 if p2 in heads:
1222 updated_heads[p2] = True
1230 updated_heads[p2] = True
1223
1231
1224 # this is the set of all roots we have to push
1232 # this is the set of all roots we have to push
1225 if heads:
1233 if heads:
1226 return subset, updated_heads.keys()
1234 return subset, updated_heads.keys()
1227 else:
1235 else:
1228 return subset
1236 return subset
1229
1237
1230 def pull(self, remote, heads=None, force=False, lock=None):
1238 def pull(self, remote, heads=None, force=False, lock=None):
1231 mylock = False
1239 mylock = False
1232 if not lock:
1240 if not lock:
1233 lock = self.lock()
1241 lock = self.lock()
1234 mylock = True
1242 mylock = True
1235
1243
1236 try:
1244 try:
1237 fetch = self.findincoming(remote, force=force)
1245 fetch = self.findincoming(remote, force=force)
1238 if fetch == [nullid]:
1246 if fetch == [nullid]:
1239 self.ui.status(_("requesting all changes\n"))
1247 self.ui.status(_("requesting all changes\n"))
1240
1248
1241 if not fetch:
1249 if not fetch:
1242 self.ui.status(_("no changes found\n"))
1250 self.ui.status(_("no changes found\n"))
1243 return 0
1251 return 0
1244
1252
1245 if heads is None:
1253 if heads is None:
1246 cg = remote.changegroup(fetch, 'pull')
1254 cg = remote.changegroup(fetch, 'pull')
1247 else:
1255 else:
1248 if 'changegroupsubset' not in remote.capabilities:
1256 if 'changegroupsubset' not in remote.capabilities:
1249 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1257 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1250 cg = remote.changegroupsubset(fetch, heads, 'pull')
1258 cg = remote.changegroupsubset(fetch, heads, 'pull')
1251 return self.addchangegroup(cg, 'pull', remote.url())
1259 return self.addchangegroup(cg, 'pull', remote.url())
1252 finally:
1260 finally:
1253 if mylock:
1261 if mylock:
1254 lock.release()
1262 lock.release()
1255
1263
1256 def push(self, remote, force=False, revs=None):
1264 def push(self, remote, force=False, revs=None):
1257 # there are two ways to push to remote repo:
1265 # there are two ways to push to remote repo:
1258 #
1266 #
1259 # addchangegroup assumes local user can lock remote
1267 # addchangegroup assumes local user can lock remote
1260 # repo (local filesystem, old ssh servers).
1268 # repo (local filesystem, old ssh servers).
1261 #
1269 #
1262 # unbundle assumes local user cannot lock remote repo (new ssh
1270 # unbundle assumes local user cannot lock remote repo (new ssh
1263 # servers, http servers).
1271 # servers, http servers).
1264
1272
1265 if remote.capable('unbundle'):
1273 if remote.capable('unbundle'):
1266 return self.push_unbundle(remote, force, revs)
1274 return self.push_unbundle(remote, force, revs)
1267 return self.push_addchangegroup(remote, force, revs)
1275 return self.push_addchangegroup(remote, force, revs)
1268
1276
1269 def prepush(self, remote, force, revs):
1277 def prepush(self, remote, force, revs):
1270 base = {}
1278 base = {}
1271 remote_heads = remote.heads()
1279 remote_heads = remote.heads()
1272 inc = self.findincoming(remote, base, remote_heads, force=force)
1280 inc = self.findincoming(remote, base, remote_heads, force=force)
1273 if not force and inc:
1281 if not force and inc:
1274 self.ui.warn(_("abort: unsynced remote changes!\n"))
1282 self.ui.warn(_("abort: unsynced remote changes!\n"))
1275 self.ui.status(_("(did you forget to sync?"
1283 self.ui.status(_("(did you forget to sync?"
1276 " use push -f to force)\n"))
1284 " use push -f to force)\n"))
1277 return None, 1
1285 return None, 1
1278
1286
1279 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1287 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1280 if revs is not None:
1288 if revs is not None:
1281 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1289 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1282 else:
1290 else:
1283 bases, heads = update, self.changelog.heads()
1291 bases, heads = update, self.changelog.heads()
1284
1292
1285 if not bases:
1293 if not bases:
1286 self.ui.status(_("no changes found\n"))
1294 self.ui.status(_("no changes found\n"))
1287 return None, 1
1295 return None, 1
1288 elif not force:
1296 elif not force:
1289 # FIXME we don't properly detect creation of new heads
1297 # FIXME we don't properly detect creation of new heads
1290 # in the push -r case, assume the user knows what he's doing
1298 # in the push -r case, assume the user knows what he's doing
1291 if not revs and len(remote_heads) < len(heads) \
1299 if not revs and len(remote_heads) < len(heads) \
1292 and remote_heads != [nullid]:
1300 and remote_heads != [nullid]:
1293 self.ui.warn(_("abort: push creates new remote branches!\n"))
1301 self.ui.warn(_("abort: push creates new remote branches!\n"))
1294 self.ui.status(_("(did you forget to merge?"
1302 self.ui.status(_("(did you forget to merge?"
1295 " use push -f to force)\n"))
1303 " use push -f to force)\n"))
1296 return None, 1
1304 return None, 1
1297
1305
1298 if revs is None:
1306 if revs is None:
1299 cg = self.changegroup(update, 'push')
1307 cg = self.changegroup(update, 'push')
1300 else:
1308 else:
1301 cg = self.changegroupsubset(update, revs, 'push')
1309 cg = self.changegroupsubset(update, revs, 'push')
1302 return cg, remote_heads
1310 return cg, remote_heads
1303
1311
1304 def push_addchangegroup(self, remote, force, revs):
1312 def push_addchangegroup(self, remote, force, revs):
1305 lock = remote.lock()
1313 lock = remote.lock()
1306
1314
1307 ret = self.prepush(remote, force, revs)
1315 ret = self.prepush(remote, force, revs)
1308 if ret[0] is not None:
1316 if ret[0] is not None:
1309 cg, remote_heads = ret
1317 cg, remote_heads = ret
1310 return remote.addchangegroup(cg, 'push', self.url())
1318 return remote.addchangegroup(cg, 'push', self.url())
1311 return ret[1]
1319 return ret[1]
1312
1320
1313 def push_unbundle(self, remote, force, revs):
1321 def push_unbundle(self, remote, force, revs):
1314 # local repo finds heads on server, finds out what revs it
1322 # local repo finds heads on server, finds out what revs it
1315 # must push. once revs transferred, if server finds it has
1323 # must push. once revs transferred, if server finds it has
1316 # different heads (someone else won commit/push race), server
1324 # different heads (someone else won commit/push race), server
1317 # aborts.
1325 # aborts.
1318
1326
1319 ret = self.prepush(remote, force, revs)
1327 ret = self.prepush(remote, force, revs)
1320 if ret[0] is not None:
1328 if ret[0] is not None:
1321 cg, remote_heads = ret
1329 cg, remote_heads = ret
1322 if force: remote_heads = ['force']
1330 if force: remote_heads = ['force']
1323 return remote.unbundle(cg, remote_heads, 'push')
1331 return remote.unbundle(cg, remote_heads, 'push')
1324 return ret[1]
1332 return ret[1]
1325
1333
1326 def changegroupsubset(self, bases, heads, source):
1334 def changegroupsubset(self, bases, heads, source):
1327 """This function generates a changegroup consisting of all the nodes
1335 """This function generates a changegroup consisting of all the nodes
1328 that are descendents of any of the bases, and ancestors of any of
1336 that are descendents of any of the bases, and ancestors of any of
1329 the heads.
1337 the heads.
1330
1338
1331 It is fairly complex as determining which filenodes and which
1339 It is fairly complex as determining which filenodes and which
1332 manifest nodes need to be included for the changeset to be complete
1340 manifest nodes need to be included for the changeset to be complete
1333 is non-trivial.
1341 is non-trivial.
1334
1342
1335 Another wrinkle is doing the reverse, figuring out which changeset in
1343 Another wrinkle is doing the reverse, figuring out which changeset in
1336 the changegroup a particular filenode or manifestnode belongs to."""
1344 the changegroup a particular filenode or manifestnode belongs to."""
1337
1345
1338 self.hook('preoutgoing', throw=True, source=source)
1346 self.hook('preoutgoing', throw=True, source=source)
1339
1347
1340 # Set up some initial variables
1348 # Set up some initial variables
1341 # Make it easy to refer to self.changelog
1349 # Make it easy to refer to self.changelog
1342 cl = self.changelog
1350 cl = self.changelog
1343 # msng is short for missing - compute the list of changesets in this
1351 # msng is short for missing - compute the list of changesets in this
1344 # changegroup.
1352 # changegroup.
1345 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1353 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1346 # Some bases may turn out to be superfluous, and some heads may be
1354 # Some bases may turn out to be superfluous, and some heads may be
1347 # too. nodesbetween will return the minimal set of bases and heads
1355 # too. nodesbetween will return the minimal set of bases and heads
1348 # necessary to re-create the changegroup.
1356 # necessary to re-create the changegroup.
1349
1357
1350 # Known heads are the list of heads that it is assumed the recipient
1358 # Known heads are the list of heads that it is assumed the recipient
1351 # of this changegroup will know about.
1359 # of this changegroup will know about.
1352 knownheads = {}
1360 knownheads = {}
1353 # We assume that all parents of bases are known heads.
1361 # We assume that all parents of bases are known heads.
1354 for n in bases:
1362 for n in bases:
1355 for p in cl.parents(n):
1363 for p in cl.parents(n):
1356 if p != nullid:
1364 if p != nullid:
1357 knownheads[p] = 1
1365 knownheads[p] = 1
1358 knownheads = knownheads.keys()
1366 knownheads = knownheads.keys()
1359 if knownheads:
1367 if knownheads:
1360 # Now that we know what heads are known, we can compute which
1368 # Now that we know what heads are known, we can compute which
1361 # changesets are known. The recipient must know about all
1369 # changesets are known. The recipient must know about all
1362 # changesets required to reach the known heads from the null
1370 # changesets required to reach the known heads from the null
1363 # changeset.
1371 # changeset.
1364 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1372 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1365 junk = None
1373 junk = None
1366 # Transform the list into an ersatz set.
1374 # Transform the list into an ersatz set.
1367 has_cl_set = dict.fromkeys(has_cl_set)
1375 has_cl_set = dict.fromkeys(has_cl_set)
1368 else:
1376 else:
1369 # If there were no known heads, the recipient cannot be assumed to
1377 # If there were no known heads, the recipient cannot be assumed to
1370 # know about any changesets.
1378 # know about any changesets.
1371 has_cl_set = {}
1379 has_cl_set = {}
1372
1380
1373 # Make it easy to refer to self.manifest
1381 # Make it easy to refer to self.manifest
1374 mnfst = self.manifest
1382 mnfst = self.manifest
1375 # We don't know which manifests are missing yet
1383 # We don't know which manifests are missing yet
1376 msng_mnfst_set = {}
1384 msng_mnfst_set = {}
1377 # Nor do we know which filenodes are missing.
1385 # Nor do we know which filenodes are missing.
1378 msng_filenode_set = {}
1386 msng_filenode_set = {}
1379
1387
1380 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1388 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1381 junk = None
1389 junk = None
1382
1390
1383 # A changeset always belongs to itself, so the changenode lookup
1391 # A changeset always belongs to itself, so the changenode lookup
1384 # function for a changenode is identity.
1392 # function for a changenode is identity.
1385 def identity(x):
1393 def identity(x):
1386 return x
1394 return x
1387
1395
1388 # A function generating function. Sets up an environment for the
1396 # A function generating function. Sets up an environment for the
1389 # inner function.
1397 # inner function.
1390 def cmp_by_rev_func(revlog):
1398 def cmp_by_rev_func(revlog):
1391 # Compare two nodes by their revision number in the environment's
1399 # Compare two nodes by their revision number in the environment's
1392 # revision history. Since the revision number both represents the
1400 # revision history. Since the revision number both represents the
1393 # most efficient order to read the nodes in, and represents a
1401 # most efficient order to read the nodes in, and represents a
1394 # topological sorting of the nodes, this function is often useful.
1402 # topological sorting of the nodes, this function is often useful.
1395 def cmp_by_rev(a, b):
1403 def cmp_by_rev(a, b):
1396 return cmp(revlog.rev(a), revlog.rev(b))
1404 return cmp(revlog.rev(a), revlog.rev(b))
1397 return cmp_by_rev
1405 return cmp_by_rev
1398
1406
1399 # If we determine that a particular file or manifest node must be a
1407 # If we determine that a particular file or manifest node must be a
1400 # node that the recipient of the changegroup will already have, we can
1408 # node that the recipient of the changegroup will already have, we can
1401 # also assume the recipient will have all the parents. This function
1409 # also assume the recipient will have all the parents. This function
1402 # prunes them from the set of missing nodes.
1410 # prunes them from the set of missing nodes.
1403 def prune_parents(revlog, hasset, msngset):
1411 def prune_parents(revlog, hasset, msngset):
1404 haslst = hasset.keys()
1412 haslst = hasset.keys()
1405 haslst.sort(cmp_by_rev_func(revlog))
1413 haslst.sort(cmp_by_rev_func(revlog))
1406 for node in haslst:
1414 for node in haslst:
1407 parentlst = [p for p in revlog.parents(node) if p != nullid]
1415 parentlst = [p for p in revlog.parents(node) if p != nullid]
1408 while parentlst:
1416 while parentlst:
1409 n = parentlst.pop()
1417 n = parentlst.pop()
1410 if n not in hasset:
1418 if n not in hasset:
1411 hasset[n] = 1
1419 hasset[n] = 1
1412 p = [p for p in revlog.parents(n) if p != nullid]
1420 p = [p for p in revlog.parents(n) if p != nullid]
1413 parentlst.extend(p)
1421 parentlst.extend(p)
1414 for n in hasset:
1422 for n in hasset:
1415 msngset.pop(n, None)
1423 msngset.pop(n, None)
1416
1424
1417 # This is a function generating function used to set up an environment
1425 # This is a function generating function used to set up an environment
1418 # for the inner function to execute in.
1426 # for the inner function to execute in.
1419 def manifest_and_file_collector(changedfileset):
1427 def manifest_and_file_collector(changedfileset):
1420 # This is an information gathering function that gathers
1428 # This is an information gathering function that gathers
1421 # information from each changeset node that goes out as part of
1429 # information from each changeset node that goes out as part of
1422 # the changegroup. The information gathered is a list of which
1430 # the changegroup. The information gathered is a list of which
1423 # manifest nodes are potentially required (the recipient may
1431 # manifest nodes are potentially required (the recipient may
1424 # already have them) and total list of all files which were
1432 # already have them) and total list of all files which were
1425 # changed in any changeset in the changegroup.
1433 # changed in any changeset in the changegroup.
1426 #
1434 #
1427 # We also remember the first changenode we saw any manifest
1435 # We also remember the first changenode we saw any manifest
1428 # referenced by so we can later determine which changenode 'owns'
1436 # referenced by so we can later determine which changenode 'owns'
1429 # the manifest.
1437 # the manifest.
1430 def collect_manifests_and_files(clnode):
1438 def collect_manifests_and_files(clnode):
1431 c = cl.read(clnode)
1439 c = cl.read(clnode)
1432 for f in c[3]:
1440 for f in c[3]:
1433 # This is to make sure we only have one instance of each
1441 # This is to make sure we only have one instance of each
1434 # filename string for each filename.
1442 # filename string for each filename.
1435 changedfileset.setdefault(f, f)
1443 changedfileset.setdefault(f, f)
1436 msng_mnfst_set.setdefault(c[0], clnode)
1444 msng_mnfst_set.setdefault(c[0], clnode)
1437 return collect_manifests_and_files
1445 return collect_manifests_and_files
1438
1446
1439 # Figure out which manifest nodes (of the ones we think might be part
1447 # Figure out which manifest nodes (of the ones we think might be part
1440 # of the changegroup) the recipient must know about and remove them
1448 # of the changegroup) the recipient must know about and remove them
1441 # from the changegroup.
1449 # from the changegroup.
1442 def prune_manifests():
1450 def prune_manifests():
1443 has_mnfst_set = {}
1451 has_mnfst_set = {}
1444 for n in msng_mnfst_set:
1452 for n in msng_mnfst_set:
1445 # If a 'missing' manifest thinks it belongs to a changenode
1453 # If a 'missing' manifest thinks it belongs to a changenode
1446 # the recipient is assumed to have, obviously the recipient
1454 # the recipient is assumed to have, obviously the recipient
1447 # must have that manifest.
1455 # must have that manifest.
1448 linknode = cl.node(mnfst.linkrev(n))
1456 linknode = cl.node(mnfst.linkrev(n))
1449 if linknode in has_cl_set:
1457 if linknode in has_cl_set:
1450 has_mnfst_set[n] = 1
1458 has_mnfst_set[n] = 1
1451 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1459 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1452
1460
1453 # Use the information collected in collect_manifests_and_files to say
1461 # Use the information collected in collect_manifests_and_files to say
1454 # which changenode any manifestnode belongs to.
1462 # which changenode any manifestnode belongs to.
1455 def lookup_manifest_link(mnfstnode):
1463 def lookup_manifest_link(mnfstnode):
1456 return msng_mnfst_set[mnfstnode]
1464 return msng_mnfst_set[mnfstnode]
1457
1465
1458 # A function generating function that sets up the initial environment
1466 # A function generating function that sets up the initial environment
1459 # the inner function.
1467 # the inner function.
1460 def filenode_collector(changedfiles):
1468 def filenode_collector(changedfiles):
1461 next_rev = [0]
1469 next_rev = [0]
1462 # This gathers information from each manifestnode included in the
1470 # This gathers information from each manifestnode included in the
1463 # changegroup about which filenodes the manifest node references
1471 # changegroup about which filenodes the manifest node references
1464 # so we can include those in the changegroup too.
1472 # so we can include those in the changegroup too.
1465 #
1473 #
1466 # It also remembers which changenode each filenode belongs to. It
1474 # It also remembers which changenode each filenode belongs to. It
1467 # does this by assuming the a filenode belongs to the changenode
1475 # does this by assuming the a filenode belongs to the changenode
1468 # the first manifest that references it belongs to.
1476 # the first manifest that references it belongs to.
1469 def collect_msng_filenodes(mnfstnode):
1477 def collect_msng_filenodes(mnfstnode):
1470 r = mnfst.rev(mnfstnode)
1478 r = mnfst.rev(mnfstnode)
1471 if r == next_rev[0]:
1479 if r == next_rev[0]:
1472 # If the last rev we looked at was the one just previous,
1480 # If the last rev we looked at was the one just previous,
1473 # we only need to see a diff.
1481 # we only need to see a diff.
1474 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1482 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1475 # For each line in the delta
1483 # For each line in the delta
1476 for dline in delta.splitlines():
1484 for dline in delta.splitlines():
1477 # get the filename and filenode for that line
1485 # get the filename and filenode for that line
1478 f, fnode = dline.split('\0')
1486 f, fnode = dline.split('\0')
1479 fnode = bin(fnode[:40])
1487 fnode = bin(fnode[:40])
1480 f = changedfiles.get(f, None)
1488 f = changedfiles.get(f, None)
1481 # And if the file is in the list of files we care
1489 # And if the file is in the list of files we care
1482 # about.
1490 # about.
1483 if f is not None:
1491 if f is not None:
1484 # Get the changenode this manifest belongs to
1492 # Get the changenode this manifest belongs to
1485 clnode = msng_mnfst_set[mnfstnode]
1493 clnode = msng_mnfst_set[mnfstnode]
1486 # Create the set of filenodes for the file if
1494 # Create the set of filenodes for the file if
1487 # there isn't one already.
1495 # there isn't one already.
1488 ndset = msng_filenode_set.setdefault(f, {})
1496 ndset = msng_filenode_set.setdefault(f, {})
1489 # And set the filenode's changelog node to the
1497 # And set the filenode's changelog node to the
1490 # manifest's if it hasn't been set already.
1498 # manifest's if it hasn't been set already.
1491 ndset.setdefault(fnode, clnode)
1499 ndset.setdefault(fnode, clnode)
1492 else:
1500 else:
1493 # Otherwise we need a full manifest.
1501 # Otherwise we need a full manifest.
1494 m = mnfst.read(mnfstnode)
1502 m = mnfst.read(mnfstnode)
1495 # For every file in we care about.
1503 # For every file in we care about.
1496 for f in changedfiles:
1504 for f in changedfiles:
1497 fnode = m.get(f, None)
1505 fnode = m.get(f, None)
1498 # If it's in the manifest
1506 # If it's in the manifest
1499 if fnode is not None:
1507 if fnode is not None:
1500 # See comments above.
1508 # See comments above.
1501 clnode = msng_mnfst_set[mnfstnode]
1509 clnode = msng_mnfst_set[mnfstnode]
1502 ndset = msng_filenode_set.setdefault(f, {})
1510 ndset = msng_filenode_set.setdefault(f, {})
1503 ndset.setdefault(fnode, clnode)
1511 ndset.setdefault(fnode, clnode)
1504 # Remember the revision we hope to see next.
1512 # Remember the revision we hope to see next.
1505 next_rev[0] = r + 1
1513 next_rev[0] = r + 1
1506 return collect_msng_filenodes
1514 return collect_msng_filenodes
1507
1515
1508 # We have a list of filenodes we think we need for a file, lets remove
1516 # We have a list of filenodes we think we need for a file, lets remove
1509 # all those we now the recipient must have.
1517 # all those we now the recipient must have.
1510 def prune_filenodes(f, filerevlog):
1518 def prune_filenodes(f, filerevlog):
1511 msngset = msng_filenode_set[f]
1519 msngset = msng_filenode_set[f]
1512 hasset = {}
1520 hasset = {}
1513 # If a 'missing' filenode thinks it belongs to a changenode we
1521 # If a 'missing' filenode thinks it belongs to a changenode we
1514 # assume the recipient must have, then the recipient must have
1522 # assume the recipient must have, then the recipient must have
1515 # that filenode.
1523 # that filenode.
1516 for n in msngset:
1524 for n in msngset:
1517 clnode = cl.node(filerevlog.linkrev(n))
1525 clnode = cl.node(filerevlog.linkrev(n))
1518 if clnode in has_cl_set:
1526 if clnode in has_cl_set:
1519 hasset[n] = 1
1527 hasset[n] = 1
1520 prune_parents(filerevlog, hasset, msngset)
1528 prune_parents(filerevlog, hasset, msngset)
1521
1529
1522 # A function generator function that sets up the a context for the
1530 # A function generator function that sets up the a context for the
1523 # inner function.
1531 # inner function.
1524 def lookup_filenode_link_func(fname):
1532 def lookup_filenode_link_func(fname):
1525 msngset = msng_filenode_set[fname]
1533 msngset = msng_filenode_set[fname]
1526 # Lookup the changenode the filenode belongs to.
1534 # Lookup the changenode the filenode belongs to.
1527 def lookup_filenode_link(fnode):
1535 def lookup_filenode_link(fnode):
1528 return msngset[fnode]
1536 return msngset[fnode]
1529 return lookup_filenode_link
1537 return lookup_filenode_link
1530
1538
1531 # Now that we have all theses utility functions to help out and
1539 # Now that we have all theses utility functions to help out and
1532 # logically divide up the task, generate the group.
1540 # logically divide up the task, generate the group.
1533 def gengroup():
1541 def gengroup():
1534 # The set of changed files starts empty.
1542 # The set of changed files starts empty.
1535 changedfiles = {}
1543 changedfiles = {}
1536 # Create a changenode group generator that will call our functions
1544 # Create a changenode group generator that will call our functions
1537 # back to lookup the owning changenode and collect information.
1545 # back to lookup the owning changenode and collect information.
1538 group = cl.group(msng_cl_lst, identity,
1546 group = cl.group(msng_cl_lst, identity,
1539 manifest_and_file_collector(changedfiles))
1547 manifest_and_file_collector(changedfiles))
1540 for chnk in group:
1548 for chnk in group:
1541 yield chnk
1549 yield chnk
1542
1550
1543 # The list of manifests has been collected by the generator
1551 # The list of manifests has been collected by the generator
1544 # calling our functions back.
1552 # calling our functions back.
1545 prune_manifests()
1553 prune_manifests()
1546 msng_mnfst_lst = msng_mnfst_set.keys()
1554 msng_mnfst_lst = msng_mnfst_set.keys()
1547 # Sort the manifestnodes by revision number.
1555 # Sort the manifestnodes by revision number.
1548 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1556 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1549 # Create a generator for the manifestnodes that calls our lookup
1557 # Create a generator for the manifestnodes that calls our lookup
1550 # and data collection functions back.
1558 # and data collection functions back.
1551 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1559 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1552 filenode_collector(changedfiles))
1560 filenode_collector(changedfiles))
1553 for chnk in group:
1561 for chnk in group:
1554 yield chnk
1562 yield chnk
1555
1563
1556 # These are no longer needed, dereference and toss the memory for
1564 # These are no longer needed, dereference and toss the memory for
1557 # them.
1565 # them.
1558 msng_mnfst_lst = None
1566 msng_mnfst_lst = None
1559 msng_mnfst_set.clear()
1567 msng_mnfst_set.clear()
1560
1568
1561 changedfiles = changedfiles.keys()
1569 changedfiles = changedfiles.keys()
1562 changedfiles.sort()
1570 changedfiles.sort()
1563 # Go through all our files in order sorted by name.
1571 # Go through all our files in order sorted by name.
1564 for fname in changedfiles:
1572 for fname in changedfiles:
1565 filerevlog = self.file(fname)
1573 filerevlog = self.file(fname)
1566 # Toss out the filenodes that the recipient isn't really
1574 # Toss out the filenodes that the recipient isn't really
1567 # missing.
1575 # missing.
1568 if msng_filenode_set.has_key(fname):
1576 if msng_filenode_set.has_key(fname):
1569 prune_filenodes(fname, filerevlog)
1577 prune_filenodes(fname, filerevlog)
1570 msng_filenode_lst = msng_filenode_set[fname].keys()
1578 msng_filenode_lst = msng_filenode_set[fname].keys()
1571 else:
1579 else:
1572 msng_filenode_lst = []
1580 msng_filenode_lst = []
1573 # If any filenodes are left, generate the group for them,
1581 # If any filenodes are left, generate the group for them,
1574 # otherwise don't bother.
1582 # otherwise don't bother.
1575 if len(msng_filenode_lst) > 0:
1583 if len(msng_filenode_lst) > 0:
1576 yield changegroup.genchunk(fname)
1584 yield changegroup.genchunk(fname)
1577 # Sort the filenodes by their revision #
1585 # Sort the filenodes by their revision #
1578 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1586 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1579 # Create a group generator and only pass in a changenode
1587 # Create a group generator and only pass in a changenode
1580 # lookup function as we need to collect no information
1588 # lookup function as we need to collect no information
1581 # from filenodes.
1589 # from filenodes.
1582 group = filerevlog.group(msng_filenode_lst,
1590 group = filerevlog.group(msng_filenode_lst,
1583 lookup_filenode_link_func(fname))
1591 lookup_filenode_link_func(fname))
1584 for chnk in group:
1592 for chnk in group:
1585 yield chnk
1593 yield chnk
1586 if msng_filenode_set.has_key(fname):
1594 if msng_filenode_set.has_key(fname):
1587 # Don't need this anymore, toss it to free memory.
1595 # Don't need this anymore, toss it to free memory.
1588 del msng_filenode_set[fname]
1596 del msng_filenode_set[fname]
1589 # Signal that no more groups are left.
1597 # Signal that no more groups are left.
1590 yield changegroup.closechunk()
1598 yield changegroup.closechunk()
1591
1599
1592 if msng_cl_lst:
1600 if msng_cl_lst:
1593 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1601 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1594
1602
1595 return util.chunkbuffer(gengroup())
1603 return util.chunkbuffer(gengroup())
1596
1604
1597 def changegroup(self, basenodes, source):
1605 def changegroup(self, basenodes, source):
1598 """Generate a changegroup of all nodes that we have that a recipient
1606 """Generate a changegroup of all nodes that we have that a recipient
1599 doesn't.
1607 doesn't.
1600
1608
1601 This is much easier than the previous function as we can assume that
1609 This is much easier than the previous function as we can assume that
1602 the recipient has any changenode we aren't sending them."""
1610 the recipient has any changenode we aren't sending them."""
1603
1611
1604 self.hook('preoutgoing', throw=True, source=source)
1612 self.hook('preoutgoing', throw=True, source=source)
1605
1613
1606 cl = self.changelog
1614 cl = self.changelog
1607 nodes = cl.nodesbetween(basenodes, None)[0]
1615 nodes = cl.nodesbetween(basenodes, None)[0]
1608 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1616 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1609
1617
1610 def identity(x):
1618 def identity(x):
1611 return x
1619 return x
1612
1620
1613 def gennodelst(revlog):
1621 def gennodelst(revlog):
1614 for r in xrange(0, revlog.count()):
1622 for r in xrange(0, revlog.count()):
1615 n = revlog.node(r)
1623 n = revlog.node(r)
1616 if revlog.linkrev(n) in revset:
1624 if revlog.linkrev(n) in revset:
1617 yield n
1625 yield n
1618
1626
1619 def changed_file_collector(changedfileset):
1627 def changed_file_collector(changedfileset):
1620 def collect_changed_files(clnode):
1628 def collect_changed_files(clnode):
1621 c = cl.read(clnode)
1629 c = cl.read(clnode)
1622 for fname in c[3]:
1630 for fname in c[3]:
1623 changedfileset[fname] = 1
1631 changedfileset[fname] = 1
1624 return collect_changed_files
1632 return collect_changed_files
1625
1633
1626 def lookuprevlink_func(revlog):
1634 def lookuprevlink_func(revlog):
1627 def lookuprevlink(n):
1635 def lookuprevlink(n):
1628 return cl.node(revlog.linkrev(n))
1636 return cl.node(revlog.linkrev(n))
1629 return lookuprevlink
1637 return lookuprevlink
1630
1638
1631 def gengroup():
1639 def gengroup():
1632 # construct a list of all changed files
1640 # construct a list of all changed files
1633 changedfiles = {}
1641 changedfiles = {}
1634
1642
1635 for chnk in cl.group(nodes, identity,
1643 for chnk in cl.group(nodes, identity,
1636 changed_file_collector(changedfiles)):
1644 changed_file_collector(changedfiles)):
1637 yield chnk
1645 yield chnk
1638 changedfiles = changedfiles.keys()
1646 changedfiles = changedfiles.keys()
1639 changedfiles.sort()
1647 changedfiles.sort()
1640
1648
1641 mnfst = self.manifest
1649 mnfst = self.manifest
1642 nodeiter = gennodelst(mnfst)
1650 nodeiter = gennodelst(mnfst)
1643 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1651 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1644 yield chnk
1652 yield chnk
1645
1653
1646 for fname in changedfiles:
1654 for fname in changedfiles:
1647 filerevlog = self.file(fname)
1655 filerevlog = self.file(fname)
1648 nodeiter = gennodelst(filerevlog)
1656 nodeiter = gennodelst(filerevlog)
1649 nodeiter = list(nodeiter)
1657 nodeiter = list(nodeiter)
1650 if nodeiter:
1658 if nodeiter:
1651 yield changegroup.genchunk(fname)
1659 yield changegroup.genchunk(fname)
1652 lookup = lookuprevlink_func(filerevlog)
1660 lookup = lookuprevlink_func(filerevlog)
1653 for chnk in filerevlog.group(nodeiter, lookup):
1661 for chnk in filerevlog.group(nodeiter, lookup):
1654 yield chnk
1662 yield chnk
1655
1663
1656 yield changegroup.closechunk()
1664 yield changegroup.closechunk()
1657
1665
1658 if nodes:
1666 if nodes:
1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1660
1668
1661 return util.chunkbuffer(gengroup())
1669 return util.chunkbuffer(gengroup())
1662
1670
1663 def addchangegroup(self, source, srctype, url):
1671 def addchangegroup(self, source, srctype, url):
1664 """add changegroup to repo.
1672 """add changegroup to repo.
1665 returns number of heads modified or added + 1."""
1673 returns number of heads modified or added + 1."""
1666
1674
1667 def csmap(x):
1675 def csmap(x):
1668 self.ui.debug(_("add changeset %s\n") % short(x))
1676 self.ui.debug(_("add changeset %s\n") % short(x))
1669 return cl.count()
1677 return cl.count()
1670
1678
1671 def revmap(x):
1679 def revmap(x):
1672 return cl.rev(x)
1680 return cl.rev(x)
1673
1681
1674 if not source:
1682 if not source:
1675 return 0
1683 return 0
1676
1684
1677 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1685 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1678
1686
1679 changesets = files = revisions = 0
1687 changesets = files = revisions = 0
1680
1688
1681 tr = self.transaction()
1689 tr = self.transaction()
1682
1690
1683 # write changelog data to temp files so concurrent readers will not see
1691 # write changelog data to temp files so concurrent readers will not see
1684 # inconsistent view
1692 # inconsistent view
1685 cl = None
1693 cl = None
1686 try:
1694 try:
1687 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1695 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1688
1696
1689 oldheads = len(cl.heads())
1697 oldheads = len(cl.heads())
1690
1698
1691 # pull off the changeset group
1699 # pull off the changeset group
1692 self.ui.status(_("adding changesets\n"))
1700 self.ui.status(_("adding changesets\n"))
1693 cor = cl.count() - 1
1701 cor = cl.count() - 1
1694 chunkiter = changegroup.chunkiter(source)
1702 chunkiter = changegroup.chunkiter(source)
1695 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1703 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1696 raise util.Abort(_("received changelog group is empty"))
1704 raise util.Abort(_("received changelog group is empty"))
1697 cnr = cl.count() - 1
1705 cnr = cl.count() - 1
1698 changesets = cnr - cor
1706 changesets = cnr - cor
1699
1707
1700 # pull off the manifest group
1708 # pull off the manifest group
1701 self.ui.status(_("adding manifests\n"))
1709 self.ui.status(_("adding manifests\n"))
1702 chunkiter = changegroup.chunkiter(source)
1710 chunkiter = changegroup.chunkiter(source)
1703 # no need to check for empty manifest group here:
1711 # no need to check for empty manifest group here:
1704 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1712 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1705 # no new manifest will be created and the manifest group will
1713 # no new manifest will be created and the manifest group will
1706 # be empty during the pull
1714 # be empty during the pull
1707 self.manifest.addgroup(chunkiter, revmap, tr)
1715 self.manifest.addgroup(chunkiter, revmap, tr)
1708
1716
1709 # process the files
1717 # process the files
1710 self.ui.status(_("adding file changes\n"))
1718 self.ui.status(_("adding file changes\n"))
1711 while 1:
1719 while 1:
1712 f = changegroup.getchunk(source)
1720 f = changegroup.getchunk(source)
1713 if not f:
1721 if not f:
1714 break
1722 break
1715 self.ui.debug(_("adding %s revisions\n") % f)
1723 self.ui.debug(_("adding %s revisions\n") % f)
1716 fl = self.file(f)
1724 fl = self.file(f)
1717 o = fl.count()
1725 o = fl.count()
1718 chunkiter = changegroup.chunkiter(source)
1726 chunkiter = changegroup.chunkiter(source)
1719 if fl.addgroup(chunkiter, revmap, tr) is None:
1727 if fl.addgroup(chunkiter, revmap, tr) is None:
1720 raise util.Abort(_("received file revlog group is empty"))
1728 raise util.Abort(_("received file revlog group is empty"))
1721 revisions += fl.count() - o
1729 revisions += fl.count() - o
1722 files += 1
1730 files += 1
1723
1731
1724 cl.writedata()
1732 cl.writedata()
1725 finally:
1733 finally:
1726 if cl:
1734 if cl:
1727 cl.cleanup()
1735 cl.cleanup()
1728
1736
1729 # make changelog see real files again
1737 # make changelog see real files again
1730 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1738 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1731 self.changelog.checkinlinesize(tr)
1739 self.changelog.checkinlinesize(tr)
1732
1740
1733 newheads = len(self.changelog.heads())
1741 newheads = len(self.changelog.heads())
1734 heads = ""
1742 heads = ""
1735 if oldheads and newheads != oldheads:
1743 if oldheads and newheads != oldheads:
1736 heads = _(" (%+d heads)") % (newheads - oldheads)
1744 heads = _(" (%+d heads)") % (newheads - oldheads)
1737
1745
1738 self.ui.status(_("added %d changesets"
1746 self.ui.status(_("added %d changesets"
1739 " with %d changes to %d files%s\n")
1747 " with %d changes to %d files%s\n")
1740 % (changesets, revisions, files, heads))
1748 % (changesets, revisions, files, heads))
1741
1749
1742 if changesets > 0:
1750 if changesets > 0:
1743 self.hook('pretxnchangegroup', throw=True,
1751 self.hook('pretxnchangegroup', throw=True,
1744 node=hex(self.changelog.node(cor+1)), source=srctype,
1752 node=hex(self.changelog.node(cor+1)), source=srctype,
1745 url=url)
1753 url=url)
1746
1754
1747 tr.close()
1755 tr.close()
1748
1756
1749 if changesets > 0:
1757 if changesets > 0:
1750 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1758 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1751 source=srctype, url=url)
1759 source=srctype, url=url)
1752
1760
1753 for i in xrange(cor + 1, cnr + 1):
1761 for i in xrange(cor + 1, cnr + 1):
1754 self.hook("incoming", node=hex(self.changelog.node(i)),
1762 self.hook("incoming", node=hex(self.changelog.node(i)),
1755 source=srctype, url=url)
1763 source=srctype, url=url)
1756
1764
1757 return newheads - oldheads + 1
1765 return newheads - oldheads + 1
1758
1766
1759
1767
1760 def stream_in(self, remote):
1768 def stream_in(self, remote):
1761 fp = remote.stream_out()
1769 fp = remote.stream_out()
1762 resp = int(fp.readline())
1770 resp = int(fp.readline())
1763 if resp != 0:
1771 if resp != 0:
1764 raise util.Abort(_('operation forbidden by server'))
1772 raise util.Abort(_('operation forbidden by server'))
1765 self.ui.status(_('streaming all changes\n'))
1773 self.ui.status(_('streaming all changes\n'))
1766 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1774 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1767 self.ui.status(_('%d files to transfer, %s of data\n') %
1775 self.ui.status(_('%d files to transfer, %s of data\n') %
1768 (total_files, util.bytecount(total_bytes)))
1776 (total_files, util.bytecount(total_bytes)))
1769 start = time.time()
1777 start = time.time()
1770 for i in xrange(total_files):
1778 for i in xrange(total_files):
1771 name, size = fp.readline().split('\0', 1)
1779 name, size = fp.readline().split('\0', 1)
1772 size = int(size)
1780 size = int(size)
1773 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1781 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1774 ofp = self.opener(name, 'w')
1782 ofp = self.opener(name, 'w')
1775 for chunk in util.filechunkiter(fp, limit=size):
1783 for chunk in util.filechunkiter(fp, limit=size):
1776 ofp.write(chunk)
1784 ofp.write(chunk)
1777 ofp.close()
1785 ofp.close()
1778 elapsed = time.time() - start
1786 elapsed = time.time() - start
1779 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1787 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1780 (util.bytecount(total_bytes), elapsed,
1788 (util.bytecount(total_bytes), elapsed,
1781 util.bytecount(total_bytes / elapsed)))
1789 util.bytecount(total_bytes / elapsed)))
1782 self.reload()
1790 self.reload()
1783 return len(self.heads()) + 1
1791 return len(self.heads()) + 1
1784
1792
1785 def clone(self, remote, heads=[], stream=False):
1793 def clone(self, remote, heads=[], stream=False):
1786 '''clone remote repository.
1794 '''clone remote repository.
1787
1795
1788 keyword arguments:
1796 keyword arguments:
1789 heads: list of revs to clone (forces use of pull)
1797 heads: list of revs to clone (forces use of pull)
1790 stream: use streaming clone if possible'''
1798 stream: use streaming clone if possible'''
1791
1799
1792 # now, all clients that can request uncompressed clones can
1800 # now, all clients that can request uncompressed clones can
1793 # read repo formats supported by all servers that can serve
1801 # read repo formats supported by all servers that can serve
1794 # them.
1802 # them.
1795
1803
1796 # if revlog format changes, client will have to check version
1804 # if revlog format changes, client will have to check version
1797 # and format flags on "stream" capability, and use
1805 # and format flags on "stream" capability, and use
1798 # uncompressed only if compatible.
1806 # uncompressed only if compatible.
1799
1807
1800 if stream and not heads and remote.capable('stream'):
1808 if stream and not heads and remote.capable('stream'):
1801 return self.stream_in(remote)
1809 return self.stream_in(remote)
1802 return self.pull(remote, heads)
1810 return self.pull(remote, heads)
1803
1811
1804 # used to avoid circular references so destructors work
1812 # used to avoid circular references so destructors work
1805 def aftertrans(base):
1813 def aftertrans(base):
1806 p = base
1814 p = base
1807 def a():
1815 def a():
1808 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1816 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1809 util.rename(os.path.join(p, "journal.dirstate"),
1817 util.rename(os.path.join(p, "journal.dirstate"),
1810 os.path.join(p, "undo.dirstate"))
1818 os.path.join(p, "undo.dirstate"))
1811 return a
1819 return a
1812
1820
1813 def instance(ui, path, create):
1821 def instance(ui, path, create):
1814 return localrepository(ui, util.drop_scheme('file', path), create)
1822 return localrepository(ui, util.drop_scheme('file', path), create)
1815
1823
1816 def islocal(path):
1824 def islocal(path):
1817 return True
1825 return True
General Comments 0
You need to be logged in to leave comments. Login now