##// END OF EJS Templates
Add branch support to commit
Matt Mackall -
r3419:d0459ec1 default
parent child Browse files
Show More
@@ -1,1803 +1,1814
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {}
296 self.branchcache = {}
297
297
298 try:
298 try:
299 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
302 if self.changelog.node(lrev) == last: # sanity check
302 if self.changelog.node(lrev) == last: # sanity check
303 for l in f:
303 for l in f:
304 node, label = l.rstrip().split(" ", 1)
304 node, label = l.rstrip().split(" ", 1)
305 self.branchcache[label] = bin(node)
305 self.branchcache[label] = bin(node)
306 f.close()
306 f.close()
307 except IOError:
307 except IOError:
308 last, lrev = nullid, -1
308 last, lrev = nullid, -1
309 lrev = self.changelog.rev(last)
309 lrev = self.changelog.rev(last)
310
310
311 tip = self.changelog.count() - 1
311 tip = self.changelog.count() - 1
312 if lrev != tip:
312 if lrev != tip:
313 for r in range(lrev + 1, tip + 1):
313 for r in range(lrev + 1, tip + 1):
314 n = self.changelog.node(r)
314 n = self.changelog.node(r)
315 c = self.changelog.read(n)
315 c = self.changelog.read(n)
316 b = c[5].get("branch")
316 b = c[5].get("branch")
317 if b:
317 if b:
318 self.branchcache[b] = n
318 self.branchcache[b] = n
319 self._writebranchcache()
319 self._writebranchcache()
320
320
321 return self.branchcache
321 return self.branchcache
322
322
323 def _writebranchcache(self):
323 def _writebranchcache(self):
324 f = self.opener("branches.cache", "w")
324 f = self.opener("branches.cache", "w")
325 t = self.changelog.tip()
325 t = self.changelog.tip()
326 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
327 for label, node in self.branchcache.iteritems():
327 for label, node in self.branchcache.iteritems():
328 f.write("%s %s\n" % (hex(node), label))
328 f.write("%s %s\n" % (hex(node), label))
329
329
330 def lookup(self, key):
330 def lookup(self, key):
331 if key == '.':
331 if key == '.':
332 key = self.dirstate.parents()[0]
332 key = self.dirstate.parents()[0]
333 if key == nullid:
333 if key == nullid:
334 raise repo.RepoError(_("no revision checked out"))
334 raise repo.RepoError(_("no revision checked out"))
335 if key in self.tags():
335 if key in self.tags():
336 return self.tags()[key]
336 return self.tags()[key]
337 if key in self.branchtags():
337 if key in self.branchtags():
338 return self.branchtags()[key]
338 return self.branchtags()[key]
339 try:
339 try:
340 return self.changelog.lookup(key)
340 return self.changelog.lookup(key)
341 except:
341 except:
342 raise repo.RepoError(_("unknown revision '%s'") % key)
342 raise repo.RepoError(_("unknown revision '%s'") % key)
343
343
344 def dev(self):
344 def dev(self):
345 return os.lstat(self.path).st_dev
345 return os.lstat(self.path).st_dev
346
346
347 def local(self):
347 def local(self):
348 return True
348 return True
349
349
350 def join(self, f):
350 def join(self, f):
351 return os.path.join(self.path, f)
351 return os.path.join(self.path, f)
352
352
353 def wjoin(self, f):
353 def wjoin(self, f):
354 return os.path.join(self.root, f)
354 return os.path.join(self.root, f)
355
355
356 def file(self, f):
356 def file(self, f):
357 if f[0] == '/':
357 if f[0] == '/':
358 f = f[1:]
358 f = f[1:]
359 return filelog.filelog(self.opener, f, self.revlogversion)
359 return filelog.filelog(self.opener, f, self.revlogversion)
360
360
361 def changectx(self, changeid=None):
361 def changectx(self, changeid=None):
362 return context.changectx(self, changeid)
362 return context.changectx(self, changeid)
363
363
364 def workingctx(self):
364 def workingctx(self):
365 return context.workingctx(self)
365 return context.workingctx(self)
366
366
367 def parents(self, changeid=None):
367 def parents(self, changeid=None):
368 '''
368 '''
369 get list of changectxs for parents of changeid or working directory
369 get list of changectxs for parents of changeid or working directory
370 '''
370 '''
371 if changeid is None:
371 if changeid is None:
372 pl = self.dirstate.parents()
372 pl = self.dirstate.parents()
373 else:
373 else:
374 n = self.changelog.lookup(changeid)
374 n = self.changelog.lookup(changeid)
375 pl = self.changelog.parents(n)
375 pl = self.changelog.parents(n)
376 if pl[1] == nullid:
376 if pl[1] == nullid:
377 return [self.changectx(pl[0])]
377 return [self.changectx(pl[0])]
378 return [self.changectx(pl[0]), self.changectx(pl[1])]
378 return [self.changectx(pl[0]), self.changectx(pl[1])]
379
379
380 def filectx(self, path, changeid=None, fileid=None):
380 def filectx(self, path, changeid=None, fileid=None):
381 """changeid can be a changeset revision, node, or tag.
381 """changeid can be a changeset revision, node, or tag.
382 fileid can be a file revision or node."""
382 fileid can be a file revision or node."""
383 return context.filectx(self, path, changeid, fileid)
383 return context.filectx(self, path, changeid, fileid)
384
384
385 def getcwd(self):
385 def getcwd(self):
386 return self.dirstate.getcwd()
386 return self.dirstate.getcwd()
387
387
388 def wfile(self, f, mode='r'):
388 def wfile(self, f, mode='r'):
389 return self.wopener(f, mode)
389 return self.wopener(f, mode)
390
390
391 def wread(self, filename):
391 def wread(self, filename):
392 if self.encodepats == None:
392 if self.encodepats == None:
393 l = []
393 l = []
394 for pat, cmd in self.ui.configitems("encode"):
394 for pat, cmd in self.ui.configitems("encode"):
395 mf = util.matcher(self.root, "", [pat], [], [])[1]
395 mf = util.matcher(self.root, "", [pat], [], [])[1]
396 l.append((mf, cmd))
396 l.append((mf, cmd))
397 self.encodepats = l
397 self.encodepats = l
398
398
399 data = self.wopener(filename, 'r').read()
399 data = self.wopener(filename, 'r').read()
400
400
401 for mf, cmd in self.encodepats:
401 for mf, cmd in self.encodepats:
402 if mf(filename):
402 if mf(filename):
403 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
403 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
404 data = util.filter(data, cmd)
404 data = util.filter(data, cmd)
405 break
405 break
406
406
407 return data
407 return data
408
408
409 def wwrite(self, filename, data, fd=None):
409 def wwrite(self, filename, data, fd=None):
410 if self.decodepats == None:
410 if self.decodepats == None:
411 l = []
411 l = []
412 for pat, cmd in self.ui.configitems("decode"):
412 for pat, cmd in self.ui.configitems("decode"):
413 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 mf = util.matcher(self.root, "", [pat], [], [])[1]
414 l.append((mf, cmd))
414 l.append((mf, cmd))
415 self.decodepats = l
415 self.decodepats = l
416
416
417 for mf, cmd in self.decodepats:
417 for mf, cmd in self.decodepats:
418 if mf(filename):
418 if mf(filename):
419 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
419 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
420 data = util.filter(data, cmd)
420 data = util.filter(data, cmd)
421 break
421 break
422
422
423 if fd:
423 if fd:
424 return fd.write(data)
424 return fd.write(data)
425 return self.wopener(filename, 'w').write(data)
425 return self.wopener(filename, 'w').write(data)
426
426
427 def transaction(self):
427 def transaction(self):
428 tr = self.transhandle
428 tr = self.transhandle
429 if tr != None and tr.running():
429 if tr != None and tr.running():
430 return tr.nest()
430 return tr.nest()
431
431
432 # save dirstate for rollback
432 # save dirstate for rollback
433 try:
433 try:
434 ds = self.opener("dirstate").read()
434 ds = self.opener("dirstate").read()
435 except IOError:
435 except IOError:
436 ds = ""
436 ds = ""
437 self.opener("journal.dirstate", "w").write(ds)
437 self.opener("journal.dirstate", "w").write(ds)
438
438
439 tr = transaction.transaction(self.ui.warn, self.opener,
439 tr = transaction.transaction(self.ui.warn, self.opener,
440 self.join("journal"),
440 self.join("journal"),
441 aftertrans(self.path))
441 aftertrans(self.path))
442 self.transhandle = tr
442 self.transhandle = tr
443 return tr
443 return tr
444
444
445 def recover(self):
445 def recover(self):
446 l = self.lock()
446 l = self.lock()
447 if os.path.exists(self.join("journal")):
447 if os.path.exists(self.join("journal")):
448 self.ui.status(_("rolling back interrupted transaction\n"))
448 self.ui.status(_("rolling back interrupted transaction\n"))
449 transaction.rollback(self.opener, self.join("journal"))
449 transaction.rollback(self.opener, self.join("journal"))
450 self.reload()
450 self.reload()
451 return True
451 return True
452 else:
452 else:
453 self.ui.warn(_("no interrupted transaction available\n"))
453 self.ui.warn(_("no interrupted transaction available\n"))
454 return False
454 return False
455
455
456 def rollback(self, wlock=None):
456 def rollback(self, wlock=None):
457 if not wlock:
457 if not wlock:
458 wlock = self.wlock()
458 wlock = self.wlock()
459 l = self.lock()
459 l = self.lock()
460 if os.path.exists(self.join("undo")):
460 if os.path.exists(self.join("undo")):
461 self.ui.status(_("rolling back last transaction\n"))
461 self.ui.status(_("rolling back last transaction\n"))
462 transaction.rollback(self.opener, self.join("undo"))
462 transaction.rollback(self.opener, self.join("undo"))
463 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
463 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
464 self.reload()
464 self.reload()
465 self.wreload()
465 self.wreload()
466 else:
466 else:
467 self.ui.warn(_("no rollback information available\n"))
467 self.ui.warn(_("no rollback information available\n"))
468
468
469 def wreload(self):
469 def wreload(self):
470 self.dirstate.read()
470 self.dirstate.read()
471
471
472 def reload(self):
472 def reload(self):
473 self.changelog.load()
473 self.changelog.load()
474 self.manifest.load()
474 self.manifest.load()
475 self.tagscache = None
475 self.tagscache = None
476 self.nodetagscache = None
476 self.nodetagscache = None
477
477
478 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
478 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
479 desc=None):
479 desc=None):
480 try:
480 try:
481 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
481 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
482 except lock.LockHeld, inst:
482 except lock.LockHeld, inst:
483 if not wait:
483 if not wait:
484 raise
484 raise
485 self.ui.warn(_("waiting for lock on %s held by %s\n") %
485 self.ui.warn(_("waiting for lock on %s held by %s\n") %
486 (desc, inst.args[0]))
486 (desc, inst.args[0]))
487 # default to 600 seconds timeout
487 # default to 600 seconds timeout
488 l = lock.lock(self.join(lockname),
488 l = lock.lock(self.join(lockname),
489 int(self.ui.config("ui", "timeout") or 600),
489 int(self.ui.config("ui", "timeout") or 600),
490 releasefn, desc=desc)
490 releasefn, desc=desc)
491 if acquirefn:
491 if acquirefn:
492 acquirefn()
492 acquirefn()
493 return l
493 return l
494
494
495 def lock(self, wait=1):
495 def lock(self, wait=1):
496 return self.do_lock("lock", wait, acquirefn=self.reload,
496 return self.do_lock("lock", wait, acquirefn=self.reload,
497 desc=_('repository %s') % self.origroot)
497 desc=_('repository %s') % self.origroot)
498
498
499 def wlock(self, wait=1):
499 def wlock(self, wait=1):
500 return self.do_lock("wlock", wait, self.dirstate.write,
500 return self.do_lock("wlock", wait, self.dirstate.write,
501 self.wreload,
501 self.wreload,
502 desc=_('working directory of %s') % self.origroot)
502 desc=_('working directory of %s') % self.origroot)
503
503
504 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
504 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
505 """
505 """
506 commit an individual file as part of a larger transaction
506 commit an individual file as part of a larger transaction
507 """
507 """
508
508
509 t = self.wread(fn)
509 t = self.wread(fn)
510 fl = self.file(fn)
510 fl = self.file(fn)
511 fp1 = manifest1.get(fn, nullid)
511 fp1 = manifest1.get(fn, nullid)
512 fp2 = manifest2.get(fn, nullid)
512 fp2 = manifest2.get(fn, nullid)
513
513
514 meta = {}
514 meta = {}
515 cp = self.dirstate.copied(fn)
515 cp = self.dirstate.copied(fn)
516 if cp:
516 if cp:
517 meta["copy"] = cp
517 meta["copy"] = cp
518 if not manifest2: # not a branch merge
518 if not manifest2: # not a branch merge
519 meta["copyrev"] = hex(manifest1.get(cp, nullid))
519 meta["copyrev"] = hex(manifest1.get(cp, nullid))
520 fp2 = nullid
520 fp2 = nullid
521 elif fp2 != nullid: # copied on remote side
521 elif fp2 != nullid: # copied on remote side
522 meta["copyrev"] = hex(manifest1.get(cp, nullid))
522 meta["copyrev"] = hex(manifest1.get(cp, nullid))
523 else: # copied on local side, reversed
523 else: # copied on local side, reversed
524 meta["copyrev"] = hex(manifest2.get(cp))
524 meta["copyrev"] = hex(manifest2.get(cp))
525 fp2 = nullid
525 fp2 = nullid
526 self.ui.debug(_(" %s: copy %s:%s\n") %
526 self.ui.debug(_(" %s: copy %s:%s\n") %
527 (fn, cp, meta["copyrev"]))
527 (fn, cp, meta["copyrev"]))
528 fp1 = nullid
528 fp1 = nullid
529 elif fp2 != nullid:
529 elif fp2 != nullid:
530 # is one parent an ancestor of the other?
530 # is one parent an ancestor of the other?
531 fpa = fl.ancestor(fp1, fp2)
531 fpa = fl.ancestor(fp1, fp2)
532 if fpa == fp1:
532 if fpa == fp1:
533 fp1, fp2 = fp2, nullid
533 fp1, fp2 = fp2, nullid
534 elif fpa == fp2:
534 elif fpa == fp2:
535 fp2 = nullid
535 fp2 = nullid
536
536
537 # is the file unmodified from the parent? report existing entry
537 # is the file unmodified from the parent? report existing entry
538 if fp2 == nullid and not fl.cmp(fp1, t):
538 if fp2 == nullid and not fl.cmp(fp1, t):
539 return fp1
539 return fp1
540
540
541 changelist.append(fn)
541 changelist.append(fn)
542 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
542 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
543
543
544 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
544 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
545 orig_parent = self.dirstate.parents()[0] or nullid
545 orig_parent = self.dirstate.parents()[0] or nullid
546 p1 = p1 or self.dirstate.parents()[0] or nullid
546 p1 = p1 or self.dirstate.parents()[0] or nullid
547 p2 = p2 or self.dirstate.parents()[1] or nullid
547 p2 = p2 or self.dirstate.parents()[1] or nullid
548 c1 = self.changelog.read(p1)
548 c1 = self.changelog.read(p1)
549 c2 = self.changelog.read(p2)
549 c2 = self.changelog.read(p2)
550 m1 = self.manifest.read(c1[0]).copy()
550 m1 = self.manifest.read(c1[0]).copy()
551 m2 = self.manifest.read(c2[0])
551 m2 = self.manifest.read(c2[0])
552 changed = []
552 changed = []
553 removed = []
553 removed = []
554
554
555 if orig_parent == p1:
555 if orig_parent == p1:
556 update_dirstate = 1
556 update_dirstate = 1
557 else:
557 else:
558 update_dirstate = 0
558 update_dirstate = 0
559
559
560 if not wlock:
560 if not wlock:
561 wlock = self.wlock()
561 wlock = self.wlock()
562 l = self.lock()
562 l = self.lock()
563 tr = self.transaction()
563 tr = self.transaction()
564 linkrev = self.changelog.count()
564 linkrev = self.changelog.count()
565 for f in files:
565 for f in files:
566 try:
566 try:
567 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
567 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
569 except IOError:
569 except IOError:
570 try:
570 try:
571 del m1[f]
571 del m1[f]
572 if update_dirstate:
572 if update_dirstate:
573 self.dirstate.forget([f])
573 self.dirstate.forget([f])
574 removed.append(f)
574 removed.append(f)
575 except:
575 except:
576 # deleted from p2?
576 # deleted from p2?
577 pass
577 pass
578
578
579 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
579 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
580 user = user or self.ui.username()
580 user = user or self.ui.username()
581 n = self.changelog.add(mnode, changed + removed, text,
581 n = self.changelog.add(mnode, changed + removed, text,
582 tr, p1, p2, user, date)
582 tr, p1, p2, user, date)
583 tr.close()
583 tr.close()
584 if update_dirstate:
584 if update_dirstate:
585 self.dirstate.setparents(n, nullid)
585 self.dirstate.setparents(n, nullid)
586
586
587 def commit(self, files=None, text="", user=None, date=None,
587 def commit(self, files=None, text="", user=None, date=None,
588 match=util.always, force=False, lock=None, wlock=None,
588 match=util.always, force=False, lock=None, wlock=None,
589 force_editor=False):
589 force_editor=False):
590 commit = []
590 commit = []
591 remove = []
591 remove = []
592 changed = []
592 changed = []
593
593
594 if files:
594 if files:
595 for f in files:
595 for f in files:
596 s = self.dirstate.state(f)
596 s = self.dirstate.state(f)
597 if s in 'nmai':
597 if s in 'nmai':
598 commit.append(f)
598 commit.append(f)
599 elif s == 'r':
599 elif s == 'r':
600 remove.append(f)
600 remove.append(f)
601 else:
601 else:
602 self.ui.warn(_("%s not tracked!\n") % f)
602 self.ui.warn(_("%s not tracked!\n") % f)
603 else:
603 else:
604 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
604 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
605 commit = modified + added
605 commit = modified + added
606 remove = removed
606 remove = removed
607
607
608 p1, p2 = self.dirstate.parents()
608 p1, p2 = self.dirstate.parents()
609 c1 = self.changelog.read(p1)
609 c1 = self.changelog.read(p1)
610 c2 = self.changelog.read(p2)
610 c2 = self.changelog.read(p2)
611 m1 = self.manifest.read(c1[0]).copy()
611 m1 = self.manifest.read(c1[0]).copy()
612 m2 = self.manifest.read(c2[0])
612 m2 = self.manifest.read(c2[0])
613
613
614 if not commit and not remove and not force and p2 == nullid:
614 try:
615 branchname = self.opener("branch").read().rstrip()
616 except IOError:
617 branchname = ""
618 oldname = c1[5].get("branch", "")
619
620 if not commit and not remove and not force and p2 == nullid and \
621 branchname == oldname:
615 self.ui.status(_("nothing changed\n"))
622 self.ui.status(_("nothing changed\n"))
616 return None
623 return None
617
624
618 xp1 = hex(p1)
625 xp1 = hex(p1)
619 if p2 == nullid: xp2 = ''
626 if p2 == nullid: xp2 = ''
620 else: xp2 = hex(p2)
627 else: xp2 = hex(p2)
621
628
622 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
629 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
623
630
624 if not wlock:
631 if not wlock:
625 wlock = self.wlock()
632 wlock = self.wlock()
626 if not lock:
633 if not lock:
627 lock = self.lock()
634 lock = self.lock()
628 tr = self.transaction()
635 tr = self.transaction()
629
636
630 # check in files
637 # check in files
631 new = {}
638 new = {}
632 linkrev = self.changelog.count()
639 linkrev = self.changelog.count()
633 commit.sort()
640 commit.sort()
634 for f in commit:
641 for f in commit:
635 self.ui.note(f + "\n")
642 self.ui.note(f + "\n")
636 try:
643 try:
637 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
644 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
638 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
645 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
639 except IOError:
646 except IOError:
640 self.ui.warn(_("trouble committing %s!\n") % f)
647 self.ui.warn(_("trouble committing %s!\n") % f)
641 raise
648 raise
642
649
643 # update manifest
650 # update manifest
644 m1.update(new)
651 m1.update(new)
645 for f in remove:
652 for f in remove:
646 if f in m1:
653 if f in m1:
647 del m1[f]
654 del m1[f]
648 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
655 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
649
656
650 # add changeset
657 # add changeset
651 new = new.keys()
658 new = new.keys()
652 new.sort()
659 new.sort()
653
660
654 user = user or self.ui.username()
661 user = user or self.ui.username()
655 if not text or force_editor:
662 if not text or force_editor:
656 edittext = []
663 edittext = []
657 if text:
664 if text:
658 edittext.append(text)
665 edittext.append(text)
659 edittext.append("")
666 edittext.append("")
660 if p2 != nullid:
667 if p2 != nullid:
661 edittext.append("HG: branch merge")
668 edittext.append("HG: branch merge")
662 edittext.extend(["HG: changed %s" % f for f in changed])
669 edittext.extend(["HG: changed %s" % f for f in changed])
663 edittext.extend(["HG: removed %s" % f for f in remove])
670 edittext.extend(["HG: removed %s" % f for f in remove])
664 if not changed and not remove:
671 if not changed and not remove:
665 edittext.append("HG: no files changed")
672 edittext.append("HG: no files changed")
666 edittext.append("")
673 edittext.append("")
667 # run editor in the repository root
674 # run editor in the repository root
668 olddir = os.getcwd()
675 olddir = os.getcwd()
669 os.chdir(self.root)
676 os.chdir(self.root)
670 text = self.ui.edit("\n".join(edittext), user)
677 text = self.ui.edit("\n".join(edittext), user)
671 os.chdir(olddir)
678 os.chdir(olddir)
672
679
673 lines = [line.rstrip() for line in text.rstrip().splitlines()]
680 lines = [line.rstrip() for line in text.rstrip().splitlines()]
674 while lines and not lines[0]:
681 while lines and not lines[0]:
675 del lines[0]
682 del lines[0]
676 if not lines:
683 if not lines:
677 return None
684 return None
678 text = '\n'.join(lines)
685 text = '\n'.join(lines)
679 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
686 extra = {}
687 if branchname:
688 extra["branch"] = branchname
689 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
690 user, date, extra)
680 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
691 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
681 parent2=xp2)
692 parent2=xp2)
682 tr.close()
693 tr.close()
683
694
684 self.dirstate.setparents(n)
695 self.dirstate.setparents(n)
685 self.dirstate.update(new, "n")
696 self.dirstate.update(new, "n")
686 self.dirstate.forget(remove)
697 self.dirstate.forget(remove)
687
698
688 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
699 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
689 return n
700 return n
690
701
691 def walk(self, node=None, files=[], match=util.always, badmatch=None):
702 def walk(self, node=None, files=[], match=util.always, badmatch=None):
692 if node:
703 if node:
693 fdict = dict.fromkeys(files)
704 fdict = dict.fromkeys(files)
694 for fn in self.manifest.read(self.changelog.read(node)[0]):
705 for fn in self.manifest.read(self.changelog.read(node)[0]):
695 for ffn in fdict:
706 for ffn in fdict:
696 # match if the file is the exact name or a directory
707 # match if the file is the exact name or a directory
697 if ffn == fn or fn.startswith("%s/" % ffn):
708 if ffn == fn or fn.startswith("%s/" % ffn):
698 del fdict[ffn]
709 del fdict[ffn]
699 break
710 break
700 if match(fn):
711 if match(fn):
701 yield 'm', fn
712 yield 'm', fn
702 for fn in fdict:
713 for fn in fdict:
703 if badmatch and badmatch(fn):
714 if badmatch and badmatch(fn):
704 if match(fn):
715 if match(fn):
705 yield 'b', fn
716 yield 'b', fn
706 else:
717 else:
707 self.ui.warn(_('%s: No such file in rev %s\n') % (
718 self.ui.warn(_('%s: No such file in rev %s\n') % (
708 util.pathto(self.getcwd(), fn), short(node)))
719 util.pathto(self.getcwd(), fn), short(node)))
709 else:
720 else:
710 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
721 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
711 yield src, fn
722 yield src, fn
712
723
713 def status(self, node1=None, node2=None, files=[], match=util.always,
724 def status(self, node1=None, node2=None, files=[], match=util.always,
714 wlock=None, list_ignored=False, list_clean=False):
725 wlock=None, list_ignored=False, list_clean=False):
715 """return status of files between two nodes or node and working directory
726 """return status of files between two nodes or node and working directory
716
727
717 If node1 is None, use the first dirstate parent instead.
728 If node1 is None, use the first dirstate parent instead.
718 If node2 is None, compare node1 with working directory.
729 If node2 is None, compare node1 with working directory.
719 """
730 """
720
731
721 def fcmp(fn, mf):
732 def fcmp(fn, mf):
722 t1 = self.wread(fn)
733 t1 = self.wread(fn)
723 return self.file(fn).cmp(mf.get(fn, nullid), t1)
734 return self.file(fn).cmp(mf.get(fn, nullid), t1)
724
735
725 def mfmatches(node):
736 def mfmatches(node):
726 change = self.changelog.read(node)
737 change = self.changelog.read(node)
727 mf = self.manifest.read(change[0]).copy()
738 mf = self.manifest.read(change[0]).copy()
728 for fn in mf.keys():
739 for fn in mf.keys():
729 if not match(fn):
740 if not match(fn):
730 del mf[fn]
741 del mf[fn]
731 return mf
742 return mf
732
743
733 modified, added, removed, deleted, unknown = [], [], [], [], []
744 modified, added, removed, deleted, unknown = [], [], [], [], []
734 ignored, clean = [], []
745 ignored, clean = [], []
735
746
736 compareworking = False
747 compareworking = False
737 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
748 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
738 compareworking = True
749 compareworking = True
739
750
740 if not compareworking:
751 if not compareworking:
741 # read the manifest from node1 before the manifest from node2,
752 # read the manifest from node1 before the manifest from node2,
742 # so that we'll hit the manifest cache if we're going through
753 # so that we'll hit the manifest cache if we're going through
743 # all the revisions in parent->child order.
754 # all the revisions in parent->child order.
744 mf1 = mfmatches(node1)
755 mf1 = mfmatches(node1)
745
756
746 # are we comparing the working directory?
757 # are we comparing the working directory?
747 if not node2:
758 if not node2:
748 if not wlock:
759 if not wlock:
749 try:
760 try:
750 wlock = self.wlock(wait=0)
761 wlock = self.wlock(wait=0)
751 except lock.LockException:
762 except lock.LockException:
752 wlock = None
763 wlock = None
753 (lookup, modified, added, removed, deleted, unknown,
764 (lookup, modified, added, removed, deleted, unknown,
754 ignored, clean) = self.dirstate.status(files, match,
765 ignored, clean) = self.dirstate.status(files, match,
755 list_ignored, list_clean)
766 list_ignored, list_clean)
756
767
757 # are we comparing working dir against its parent?
768 # are we comparing working dir against its parent?
758 if compareworking:
769 if compareworking:
759 if lookup:
770 if lookup:
760 # do a full compare of any files that might have changed
771 # do a full compare of any files that might have changed
761 mf2 = mfmatches(self.dirstate.parents()[0])
772 mf2 = mfmatches(self.dirstate.parents()[0])
762 for f in lookup:
773 for f in lookup:
763 if fcmp(f, mf2):
774 if fcmp(f, mf2):
764 modified.append(f)
775 modified.append(f)
765 else:
776 else:
766 clean.append(f)
777 clean.append(f)
767 if wlock is not None:
778 if wlock is not None:
768 self.dirstate.update([f], "n")
779 self.dirstate.update([f], "n")
769 else:
780 else:
770 # we are comparing working dir against non-parent
781 # we are comparing working dir against non-parent
771 # generate a pseudo-manifest for the working dir
782 # generate a pseudo-manifest for the working dir
772 # XXX: create it in dirstate.py ?
783 # XXX: create it in dirstate.py ?
773 mf2 = mfmatches(self.dirstate.parents()[0])
784 mf2 = mfmatches(self.dirstate.parents()[0])
774 for f in lookup + modified + added:
785 for f in lookup + modified + added:
775 mf2[f] = ""
786 mf2[f] = ""
776 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
787 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
777 for f in removed:
788 for f in removed:
778 if f in mf2:
789 if f in mf2:
779 del mf2[f]
790 del mf2[f]
780 else:
791 else:
781 # we are comparing two revisions
792 # we are comparing two revisions
782 mf2 = mfmatches(node2)
793 mf2 = mfmatches(node2)
783
794
784 if not compareworking:
795 if not compareworking:
785 # flush lists from dirstate before comparing manifests
796 # flush lists from dirstate before comparing manifests
786 modified, added, clean = [], [], []
797 modified, added, clean = [], [], []
787
798
788 # make sure to sort the files so we talk to the disk in a
799 # make sure to sort the files so we talk to the disk in a
789 # reasonable order
800 # reasonable order
790 mf2keys = mf2.keys()
801 mf2keys = mf2.keys()
791 mf2keys.sort()
802 mf2keys.sort()
792 for fn in mf2keys:
803 for fn in mf2keys:
793 if mf1.has_key(fn):
804 if mf1.has_key(fn):
794 if mf1.flags(fn) != mf2.flags(fn) or \
805 if mf1.flags(fn) != mf2.flags(fn) or \
795 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
806 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
796 modified.append(fn)
807 modified.append(fn)
797 elif list_clean:
808 elif list_clean:
798 clean.append(fn)
809 clean.append(fn)
799 del mf1[fn]
810 del mf1[fn]
800 else:
811 else:
801 added.append(fn)
812 added.append(fn)
802
813
803 removed = mf1.keys()
814 removed = mf1.keys()
804
815
805 # sort and return results:
816 # sort and return results:
806 for l in modified, added, removed, deleted, unknown, ignored, clean:
817 for l in modified, added, removed, deleted, unknown, ignored, clean:
807 l.sort()
818 l.sort()
808 return (modified, added, removed, deleted, unknown, ignored, clean)
819 return (modified, added, removed, deleted, unknown, ignored, clean)
809
820
810 def add(self, list, wlock=None):
821 def add(self, list, wlock=None):
811 if not wlock:
822 if not wlock:
812 wlock = self.wlock()
823 wlock = self.wlock()
813 for f in list:
824 for f in list:
814 p = self.wjoin(f)
825 p = self.wjoin(f)
815 if not os.path.exists(p):
826 if not os.path.exists(p):
816 self.ui.warn(_("%s does not exist!\n") % f)
827 self.ui.warn(_("%s does not exist!\n") % f)
817 elif not os.path.isfile(p):
828 elif not os.path.isfile(p):
818 self.ui.warn(_("%s not added: only files supported currently\n")
829 self.ui.warn(_("%s not added: only files supported currently\n")
819 % f)
830 % f)
820 elif self.dirstate.state(f) in 'an':
831 elif self.dirstate.state(f) in 'an':
821 self.ui.warn(_("%s already tracked!\n") % f)
832 self.ui.warn(_("%s already tracked!\n") % f)
822 else:
833 else:
823 self.dirstate.update([f], "a")
834 self.dirstate.update([f], "a")
824
835
825 def forget(self, list, wlock=None):
836 def forget(self, list, wlock=None):
826 if not wlock:
837 if not wlock:
827 wlock = self.wlock()
838 wlock = self.wlock()
828 for f in list:
839 for f in list:
829 if self.dirstate.state(f) not in 'ai':
840 if self.dirstate.state(f) not in 'ai':
830 self.ui.warn(_("%s not added!\n") % f)
841 self.ui.warn(_("%s not added!\n") % f)
831 else:
842 else:
832 self.dirstate.forget([f])
843 self.dirstate.forget([f])
833
844
834 def remove(self, list, unlink=False, wlock=None):
845 def remove(self, list, unlink=False, wlock=None):
835 if unlink:
846 if unlink:
836 for f in list:
847 for f in list:
837 try:
848 try:
838 util.unlink(self.wjoin(f))
849 util.unlink(self.wjoin(f))
839 except OSError, inst:
850 except OSError, inst:
840 if inst.errno != errno.ENOENT:
851 if inst.errno != errno.ENOENT:
841 raise
852 raise
842 if not wlock:
853 if not wlock:
843 wlock = self.wlock()
854 wlock = self.wlock()
844 for f in list:
855 for f in list:
845 p = self.wjoin(f)
856 p = self.wjoin(f)
846 if os.path.exists(p):
857 if os.path.exists(p):
847 self.ui.warn(_("%s still exists!\n") % f)
858 self.ui.warn(_("%s still exists!\n") % f)
848 elif self.dirstate.state(f) == 'a':
859 elif self.dirstate.state(f) == 'a':
849 self.dirstate.forget([f])
860 self.dirstate.forget([f])
850 elif f not in self.dirstate:
861 elif f not in self.dirstate:
851 self.ui.warn(_("%s not tracked!\n") % f)
862 self.ui.warn(_("%s not tracked!\n") % f)
852 else:
863 else:
853 self.dirstate.update([f], "r")
864 self.dirstate.update([f], "r")
854
865
855 def undelete(self, list, wlock=None):
866 def undelete(self, list, wlock=None):
856 p = self.dirstate.parents()[0]
867 p = self.dirstate.parents()[0]
857 mn = self.changelog.read(p)[0]
868 mn = self.changelog.read(p)[0]
858 m = self.manifest.read(mn)
869 m = self.manifest.read(mn)
859 if not wlock:
870 if not wlock:
860 wlock = self.wlock()
871 wlock = self.wlock()
861 for f in list:
872 for f in list:
862 if self.dirstate.state(f) not in "r":
873 if self.dirstate.state(f) not in "r":
863 self.ui.warn("%s not removed!\n" % f)
874 self.ui.warn("%s not removed!\n" % f)
864 else:
875 else:
865 t = self.file(f).read(m[f])
876 t = self.file(f).read(m[f])
866 self.wwrite(f, t)
877 self.wwrite(f, t)
867 util.set_exec(self.wjoin(f), m.execf(f))
878 util.set_exec(self.wjoin(f), m.execf(f))
868 self.dirstate.update([f], "n")
879 self.dirstate.update([f], "n")
869
880
870 def copy(self, source, dest, wlock=None):
881 def copy(self, source, dest, wlock=None):
871 p = self.wjoin(dest)
882 p = self.wjoin(dest)
872 if not os.path.exists(p):
883 if not os.path.exists(p):
873 self.ui.warn(_("%s does not exist!\n") % dest)
884 self.ui.warn(_("%s does not exist!\n") % dest)
874 elif not os.path.isfile(p):
885 elif not os.path.isfile(p):
875 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
886 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
876 else:
887 else:
877 if not wlock:
888 if not wlock:
878 wlock = self.wlock()
889 wlock = self.wlock()
879 if self.dirstate.state(dest) == '?':
890 if self.dirstate.state(dest) == '?':
880 self.dirstate.update([dest], "a")
891 self.dirstate.update([dest], "a")
881 self.dirstate.copy(source, dest)
892 self.dirstate.copy(source, dest)
882
893
883 def heads(self, start=None):
894 def heads(self, start=None):
884 heads = self.changelog.heads(start)
895 heads = self.changelog.heads(start)
885 # sort the output in rev descending order
896 # sort the output in rev descending order
886 heads = [(-self.changelog.rev(h), h) for h in heads]
897 heads = [(-self.changelog.rev(h), h) for h in heads]
887 heads.sort()
898 heads.sort()
888 return [n for (r, n) in heads]
899 return [n for (r, n) in heads]
889
900
890 # branchlookup returns a dict giving a list of branches for
901 # branchlookup returns a dict giving a list of branches for
891 # each head. A branch is defined as the tag of a node or
902 # each head. A branch is defined as the tag of a node or
892 # the branch of the node's parents. If a node has multiple
903 # the branch of the node's parents. If a node has multiple
893 # branch tags, tags are eliminated if they are visible from other
904 # branch tags, tags are eliminated if they are visible from other
894 # branch tags.
905 # branch tags.
895 #
906 #
896 # So, for this graph: a->b->c->d->e
907 # So, for this graph: a->b->c->d->e
897 # \ /
908 # \ /
898 # aa -----/
909 # aa -----/
899 # a has tag 2.6.12
910 # a has tag 2.6.12
900 # d has tag 2.6.13
911 # d has tag 2.6.13
901 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
912 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
902 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
913 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
903 # from the list.
914 # from the list.
904 #
915 #
905 # It is possible that more than one head will have the same branch tag.
916 # It is possible that more than one head will have the same branch tag.
906 # callers need to check the result for multiple heads under the same
917 # callers need to check the result for multiple heads under the same
907 # branch tag if that is a problem for them (ie checkout of a specific
918 # branch tag if that is a problem for them (ie checkout of a specific
908 # branch).
919 # branch).
909 #
920 #
910 # passing in a specific branch will limit the depth of the search
921 # passing in a specific branch will limit the depth of the search
911 # through the parents. It won't limit the branches returned in the
922 # through the parents. It won't limit the branches returned in the
912 # result though.
923 # result though.
913 def branchlookup(self, heads=None, branch=None):
924 def branchlookup(self, heads=None, branch=None):
914 if not heads:
925 if not heads:
915 heads = self.heads()
926 heads = self.heads()
916 headt = [ h for h in heads ]
927 headt = [ h for h in heads ]
917 chlog = self.changelog
928 chlog = self.changelog
918 branches = {}
929 branches = {}
919 merges = []
930 merges = []
920 seenmerge = {}
931 seenmerge = {}
921
932
922 # traverse the tree once for each head, recording in the branches
933 # traverse the tree once for each head, recording in the branches
923 # dict which tags are visible from this head. The branches
934 # dict which tags are visible from this head. The branches
924 # dict also records which tags are visible from each tag
935 # dict also records which tags are visible from each tag
925 # while we traverse.
936 # while we traverse.
926 while headt or merges:
937 while headt or merges:
927 if merges:
938 if merges:
928 n, found = merges.pop()
939 n, found = merges.pop()
929 visit = [n]
940 visit = [n]
930 else:
941 else:
931 h = headt.pop()
942 h = headt.pop()
932 visit = [h]
943 visit = [h]
933 found = [h]
944 found = [h]
934 seen = {}
945 seen = {}
935 while visit:
946 while visit:
936 n = visit.pop()
947 n = visit.pop()
937 if n in seen:
948 if n in seen:
938 continue
949 continue
939 pp = chlog.parents(n)
950 pp = chlog.parents(n)
940 tags = self.nodetags(n)
951 tags = self.nodetags(n)
941 if tags:
952 if tags:
942 for x in tags:
953 for x in tags:
943 if x == 'tip':
954 if x == 'tip':
944 continue
955 continue
945 for f in found:
956 for f in found:
946 branches.setdefault(f, {})[n] = 1
957 branches.setdefault(f, {})[n] = 1
947 branches.setdefault(n, {})[n] = 1
958 branches.setdefault(n, {})[n] = 1
948 break
959 break
949 if n not in found:
960 if n not in found:
950 found.append(n)
961 found.append(n)
951 if branch in tags:
962 if branch in tags:
952 continue
963 continue
953 seen[n] = 1
964 seen[n] = 1
954 if pp[1] != nullid and n not in seenmerge:
965 if pp[1] != nullid and n not in seenmerge:
955 merges.append((pp[1], [x for x in found]))
966 merges.append((pp[1], [x for x in found]))
956 seenmerge[n] = 1
967 seenmerge[n] = 1
957 if pp[0] != nullid:
968 if pp[0] != nullid:
958 visit.append(pp[0])
969 visit.append(pp[0])
959 # traverse the branches dict, eliminating branch tags from each
970 # traverse the branches dict, eliminating branch tags from each
960 # head that are visible from another branch tag for that head.
971 # head that are visible from another branch tag for that head.
961 out = {}
972 out = {}
962 viscache = {}
973 viscache = {}
963 for h in heads:
974 for h in heads:
964 def visible(node):
975 def visible(node):
965 if node in viscache:
976 if node in viscache:
966 return viscache[node]
977 return viscache[node]
967 ret = {}
978 ret = {}
968 visit = [node]
979 visit = [node]
969 while visit:
980 while visit:
970 x = visit.pop()
981 x = visit.pop()
971 if x in viscache:
982 if x in viscache:
972 ret.update(viscache[x])
983 ret.update(viscache[x])
973 elif x not in ret:
984 elif x not in ret:
974 ret[x] = 1
985 ret[x] = 1
975 if x in branches:
986 if x in branches:
976 visit[len(visit):] = branches[x].keys()
987 visit[len(visit):] = branches[x].keys()
977 viscache[node] = ret
988 viscache[node] = ret
978 return ret
989 return ret
979 if h not in branches:
990 if h not in branches:
980 continue
991 continue
981 # O(n^2), but somewhat limited. This only searches the
992 # O(n^2), but somewhat limited. This only searches the
982 # tags visible from a specific head, not all the tags in the
993 # tags visible from a specific head, not all the tags in the
983 # whole repo.
994 # whole repo.
984 for b in branches[h]:
995 for b in branches[h]:
985 vis = False
996 vis = False
986 for bb in branches[h].keys():
997 for bb in branches[h].keys():
987 if b != bb:
998 if b != bb:
988 if b in visible(bb):
999 if b in visible(bb):
989 vis = True
1000 vis = True
990 break
1001 break
991 if not vis:
1002 if not vis:
992 l = out.setdefault(h, [])
1003 l = out.setdefault(h, [])
993 l[len(l):] = self.nodetags(b)
1004 l[len(l):] = self.nodetags(b)
994 return out
1005 return out
995
1006
996 def branches(self, nodes):
1007 def branches(self, nodes):
997 if not nodes:
1008 if not nodes:
998 nodes = [self.changelog.tip()]
1009 nodes = [self.changelog.tip()]
999 b = []
1010 b = []
1000 for n in nodes:
1011 for n in nodes:
1001 t = n
1012 t = n
1002 while 1:
1013 while 1:
1003 p = self.changelog.parents(n)
1014 p = self.changelog.parents(n)
1004 if p[1] != nullid or p[0] == nullid:
1015 if p[1] != nullid or p[0] == nullid:
1005 b.append((t, n, p[0], p[1]))
1016 b.append((t, n, p[0], p[1]))
1006 break
1017 break
1007 n = p[0]
1018 n = p[0]
1008 return b
1019 return b
1009
1020
1010 def between(self, pairs):
1021 def between(self, pairs):
1011 r = []
1022 r = []
1012
1023
1013 for top, bottom in pairs:
1024 for top, bottom in pairs:
1014 n, l, i = top, [], 0
1025 n, l, i = top, [], 0
1015 f = 1
1026 f = 1
1016
1027
1017 while n != bottom:
1028 while n != bottom:
1018 p = self.changelog.parents(n)[0]
1029 p = self.changelog.parents(n)[0]
1019 if i == f:
1030 if i == f:
1020 l.append(n)
1031 l.append(n)
1021 f = f * 2
1032 f = f * 2
1022 n = p
1033 n = p
1023 i += 1
1034 i += 1
1024
1035
1025 r.append(l)
1036 r.append(l)
1026
1037
1027 return r
1038 return r
1028
1039
1029 def findincoming(self, remote, base=None, heads=None, force=False):
1040 def findincoming(self, remote, base=None, heads=None, force=False):
1030 """Return list of roots of the subsets of missing nodes from remote
1041 """Return list of roots of the subsets of missing nodes from remote
1031
1042
1032 If base dict is specified, assume that these nodes and their parents
1043 If base dict is specified, assume that these nodes and their parents
1033 exist on the remote side and that no child of a node of base exists
1044 exist on the remote side and that no child of a node of base exists
1034 in both remote and self.
1045 in both remote and self.
1035 Furthermore base will be updated to include the nodes that exists
1046 Furthermore base will be updated to include the nodes that exists
1036 in self and remote but no children exists in self and remote.
1047 in self and remote but no children exists in self and remote.
1037 If a list of heads is specified, return only nodes which are heads
1048 If a list of heads is specified, return only nodes which are heads
1038 or ancestors of these heads.
1049 or ancestors of these heads.
1039
1050
1040 All the ancestors of base are in self and in remote.
1051 All the ancestors of base are in self and in remote.
1041 All the descendants of the list returned are missing in self.
1052 All the descendants of the list returned are missing in self.
1042 (and so we know that the rest of the nodes are missing in remote, see
1053 (and so we know that the rest of the nodes are missing in remote, see
1043 outgoing)
1054 outgoing)
1044 """
1055 """
1045 m = self.changelog.nodemap
1056 m = self.changelog.nodemap
1046 search = []
1057 search = []
1047 fetch = {}
1058 fetch = {}
1048 seen = {}
1059 seen = {}
1049 seenbranch = {}
1060 seenbranch = {}
1050 if base == None:
1061 if base == None:
1051 base = {}
1062 base = {}
1052
1063
1053 if not heads:
1064 if not heads:
1054 heads = remote.heads()
1065 heads = remote.heads()
1055
1066
1056 if self.changelog.tip() == nullid:
1067 if self.changelog.tip() == nullid:
1057 base[nullid] = 1
1068 base[nullid] = 1
1058 if heads != [nullid]:
1069 if heads != [nullid]:
1059 return [nullid]
1070 return [nullid]
1060 return []
1071 return []
1061
1072
1062 # assume we're closer to the tip than the root
1073 # assume we're closer to the tip than the root
1063 # and start by examining the heads
1074 # and start by examining the heads
1064 self.ui.status(_("searching for changes\n"))
1075 self.ui.status(_("searching for changes\n"))
1065
1076
1066 unknown = []
1077 unknown = []
1067 for h in heads:
1078 for h in heads:
1068 if h not in m:
1079 if h not in m:
1069 unknown.append(h)
1080 unknown.append(h)
1070 else:
1081 else:
1071 base[h] = 1
1082 base[h] = 1
1072
1083
1073 if not unknown:
1084 if not unknown:
1074 return []
1085 return []
1075
1086
1076 req = dict.fromkeys(unknown)
1087 req = dict.fromkeys(unknown)
1077 reqcnt = 0
1088 reqcnt = 0
1078
1089
1079 # search through remote branches
1090 # search through remote branches
1080 # a 'branch' here is a linear segment of history, with four parts:
1091 # a 'branch' here is a linear segment of history, with four parts:
1081 # head, root, first parent, second parent
1092 # head, root, first parent, second parent
1082 # (a branch always has two parents (or none) by definition)
1093 # (a branch always has two parents (or none) by definition)
1083 unknown = remote.branches(unknown)
1094 unknown = remote.branches(unknown)
1084 while unknown:
1095 while unknown:
1085 r = []
1096 r = []
1086 while unknown:
1097 while unknown:
1087 n = unknown.pop(0)
1098 n = unknown.pop(0)
1088 if n[0] in seen:
1099 if n[0] in seen:
1089 continue
1100 continue
1090
1101
1091 self.ui.debug(_("examining %s:%s\n")
1102 self.ui.debug(_("examining %s:%s\n")
1092 % (short(n[0]), short(n[1])))
1103 % (short(n[0]), short(n[1])))
1093 if n[0] == nullid: # found the end of the branch
1104 if n[0] == nullid: # found the end of the branch
1094 pass
1105 pass
1095 elif n in seenbranch:
1106 elif n in seenbranch:
1096 self.ui.debug(_("branch already found\n"))
1107 self.ui.debug(_("branch already found\n"))
1097 continue
1108 continue
1098 elif n[1] and n[1] in m: # do we know the base?
1109 elif n[1] and n[1] in m: # do we know the base?
1099 self.ui.debug(_("found incomplete branch %s:%s\n")
1110 self.ui.debug(_("found incomplete branch %s:%s\n")
1100 % (short(n[0]), short(n[1])))
1111 % (short(n[0]), short(n[1])))
1101 search.append(n) # schedule branch range for scanning
1112 search.append(n) # schedule branch range for scanning
1102 seenbranch[n] = 1
1113 seenbranch[n] = 1
1103 else:
1114 else:
1104 if n[1] not in seen and n[1] not in fetch:
1115 if n[1] not in seen and n[1] not in fetch:
1105 if n[2] in m and n[3] in m:
1116 if n[2] in m and n[3] in m:
1106 self.ui.debug(_("found new changeset %s\n") %
1117 self.ui.debug(_("found new changeset %s\n") %
1107 short(n[1]))
1118 short(n[1]))
1108 fetch[n[1]] = 1 # earliest unknown
1119 fetch[n[1]] = 1 # earliest unknown
1109 for p in n[2:4]:
1120 for p in n[2:4]:
1110 if p in m:
1121 if p in m:
1111 base[p] = 1 # latest known
1122 base[p] = 1 # latest known
1112
1123
1113 for p in n[2:4]:
1124 for p in n[2:4]:
1114 if p not in req and p not in m:
1125 if p not in req and p not in m:
1115 r.append(p)
1126 r.append(p)
1116 req[p] = 1
1127 req[p] = 1
1117 seen[n[0]] = 1
1128 seen[n[0]] = 1
1118
1129
1119 if r:
1130 if r:
1120 reqcnt += 1
1131 reqcnt += 1
1121 self.ui.debug(_("request %d: %s\n") %
1132 self.ui.debug(_("request %d: %s\n") %
1122 (reqcnt, " ".join(map(short, r))))
1133 (reqcnt, " ".join(map(short, r))))
1123 for p in range(0, len(r), 10):
1134 for p in range(0, len(r), 10):
1124 for b in remote.branches(r[p:p+10]):
1135 for b in remote.branches(r[p:p+10]):
1125 self.ui.debug(_("received %s:%s\n") %
1136 self.ui.debug(_("received %s:%s\n") %
1126 (short(b[0]), short(b[1])))
1137 (short(b[0]), short(b[1])))
1127 unknown.append(b)
1138 unknown.append(b)
1128
1139
1129 # do binary search on the branches we found
1140 # do binary search on the branches we found
1130 while search:
1141 while search:
1131 n = search.pop(0)
1142 n = search.pop(0)
1132 reqcnt += 1
1143 reqcnt += 1
1133 l = remote.between([(n[0], n[1])])[0]
1144 l = remote.between([(n[0], n[1])])[0]
1134 l.append(n[1])
1145 l.append(n[1])
1135 p = n[0]
1146 p = n[0]
1136 f = 1
1147 f = 1
1137 for i in l:
1148 for i in l:
1138 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1149 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1139 if i in m:
1150 if i in m:
1140 if f <= 2:
1151 if f <= 2:
1141 self.ui.debug(_("found new branch changeset %s\n") %
1152 self.ui.debug(_("found new branch changeset %s\n") %
1142 short(p))
1153 short(p))
1143 fetch[p] = 1
1154 fetch[p] = 1
1144 base[i] = 1
1155 base[i] = 1
1145 else:
1156 else:
1146 self.ui.debug(_("narrowed branch search to %s:%s\n")
1157 self.ui.debug(_("narrowed branch search to %s:%s\n")
1147 % (short(p), short(i)))
1158 % (short(p), short(i)))
1148 search.append((p, i))
1159 search.append((p, i))
1149 break
1160 break
1150 p, f = i, f * 2
1161 p, f = i, f * 2
1151
1162
1152 # sanity check our fetch list
1163 # sanity check our fetch list
1153 for f in fetch.keys():
1164 for f in fetch.keys():
1154 if f in m:
1165 if f in m:
1155 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1166 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1156
1167
1157 if base.keys() == [nullid]:
1168 if base.keys() == [nullid]:
1158 if force:
1169 if force:
1159 self.ui.warn(_("warning: repository is unrelated\n"))
1170 self.ui.warn(_("warning: repository is unrelated\n"))
1160 else:
1171 else:
1161 raise util.Abort(_("repository is unrelated"))
1172 raise util.Abort(_("repository is unrelated"))
1162
1173
1163 self.ui.debug(_("found new changesets starting at ") +
1174 self.ui.debug(_("found new changesets starting at ") +
1164 " ".join([short(f) for f in fetch]) + "\n")
1175 " ".join([short(f) for f in fetch]) + "\n")
1165
1176
1166 self.ui.debug(_("%d total queries\n") % reqcnt)
1177 self.ui.debug(_("%d total queries\n") % reqcnt)
1167
1178
1168 return fetch.keys()
1179 return fetch.keys()
1169
1180
1170 def findoutgoing(self, remote, base=None, heads=None, force=False):
1181 def findoutgoing(self, remote, base=None, heads=None, force=False):
1171 """Return list of nodes that are roots of subsets not in remote
1182 """Return list of nodes that are roots of subsets not in remote
1172
1183
1173 If base dict is specified, assume that these nodes and their parents
1184 If base dict is specified, assume that these nodes and their parents
1174 exist on the remote side.
1185 exist on the remote side.
1175 If a list of heads is specified, return only nodes which are heads
1186 If a list of heads is specified, return only nodes which are heads
1176 or ancestors of these heads, and return a second element which
1187 or ancestors of these heads, and return a second element which
1177 contains all remote heads which get new children.
1188 contains all remote heads which get new children.
1178 """
1189 """
1179 if base == None:
1190 if base == None:
1180 base = {}
1191 base = {}
1181 self.findincoming(remote, base, heads, force=force)
1192 self.findincoming(remote, base, heads, force=force)
1182
1193
1183 self.ui.debug(_("common changesets up to ")
1194 self.ui.debug(_("common changesets up to ")
1184 + " ".join(map(short, base.keys())) + "\n")
1195 + " ".join(map(short, base.keys())) + "\n")
1185
1196
1186 remain = dict.fromkeys(self.changelog.nodemap)
1197 remain = dict.fromkeys(self.changelog.nodemap)
1187
1198
1188 # prune everything remote has from the tree
1199 # prune everything remote has from the tree
1189 del remain[nullid]
1200 del remain[nullid]
1190 remove = base.keys()
1201 remove = base.keys()
1191 while remove:
1202 while remove:
1192 n = remove.pop(0)
1203 n = remove.pop(0)
1193 if n in remain:
1204 if n in remain:
1194 del remain[n]
1205 del remain[n]
1195 for p in self.changelog.parents(n):
1206 for p in self.changelog.parents(n):
1196 remove.append(p)
1207 remove.append(p)
1197
1208
1198 # find every node whose parents have been pruned
1209 # find every node whose parents have been pruned
1199 subset = []
1210 subset = []
1200 # find every remote head that will get new children
1211 # find every remote head that will get new children
1201 updated_heads = {}
1212 updated_heads = {}
1202 for n in remain:
1213 for n in remain:
1203 p1, p2 = self.changelog.parents(n)
1214 p1, p2 = self.changelog.parents(n)
1204 if p1 not in remain and p2 not in remain:
1215 if p1 not in remain and p2 not in remain:
1205 subset.append(n)
1216 subset.append(n)
1206 if heads:
1217 if heads:
1207 if p1 in heads:
1218 if p1 in heads:
1208 updated_heads[p1] = True
1219 updated_heads[p1] = True
1209 if p2 in heads:
1220 if p2 in heads:
1210 updated_heads[p2] = True
1221 updated_heads[p2] = True
1211
1222
1212 # this is the set of all roots we have to push
1223 # this is the set of all roots we have to push
1213 if heads:
1224 if heads:
1214 return subset, updated_heads.keys()
1225 return subset, updated_heads.keys()
1215 else:
1226 else:
1216 return subset
1227 return subset
1217
1228
1218 def pull(self, remote, heads=None, force=False, lock=None):
1229 def pull(self, remote, heads=None, force=False, lock=None):
1219 mylock = False
1230 mylock = False
1220 if not lock:
1231 if not lock:
1221 lock = self.lock()
1232 lock = self.lock()
1222 mylock = True
1233 mylock = True
1223
1234
1224 try:
1235 try:
1225 fetch = self.findincoming(remote, force=force)
1236 fetch = self.findincoming(remote, force=force)
1226 if fetch == [nullid]:
1237 if fetch == [nullid]:
1227 self.ui.status(_("requesting all changes\n"))
1238 self.ui.status(_("requesting all changes\n"))
1228
1239
1229 if not fetch:
1240 if not fetch:
1230 self.ui.status(_("no changes found\n"))
1241 self.ui.status(_("no changes found\n"))
1231 return 0
1242 return 0
1232
1243
1233 if heads is None:
1244 if heads is None:
1234 cg = remote.changegroup(fetch, 'pull')
1245 cg = remote.changegroup(fetch, 'pull')
1235 else:
1246 else:
1236 cg = remote.changegroupsubset(fetch, heads, 'pull')
1247 cg = remote.changegroupsubset(fetch, heads, 'pull')
1237 return self.addchangegroup(cg, 'pull', remote.url())
1248 return self.addchangegroup(cg, 'pull', remote.url())
1238 finally:
1249 finally:
1239 if mylock:
1250 if mylock:
1240 lock.release()
1251 lock.release()
1241
1252
1242 def push(self, remote, force=False, revs=None):
1253 def push(self, remote, force=False, revs=None):
1243 # there are two ways to push to remote repo:
1254 # there are two ways to push to remote repo:
1244 #
1255 #
1245 # addchangegroup assumes local user can lock remote
1256 # addchangegroup assumes local user can lock remote
1246 # repo (local filesystem, old ssh servers).
1257 # repo (local filesystem, old ssh servers).
1247 #
1258 #
1248 # unbundle assumes local user cannot lock remote repo (new ssh
1259 # unbundle assumes local user cannot lock remote repo (new ssh
1249 # servers, http servers).
1260 # servers, http servers).
1250
1261
1251 if remote.capable('unbundle'):
1262 if remote.capable('unbundle'):
1252 return self.push_unbundle(remote, force, revs)
1263 return self.push_unbundle(remote, force, revs)
1253 return self.push_addchangegroup(remote, force, revs)
1264 return self.push_addchangegroup(remote, force, revs)
1254
1265
1255 def prepush(self, remote, force, revs):
1266 def prepush(self, remote, force, revs):
1256 base = {}
1267 base = {}
1257 remote_heads = remote.heads()
1268 remote_heads = remote.heads()
1258 inc = self.findincoming(remote, base, remote_heads, force=force)
1269 inc = self.findincoming(remote, base, remote_heads, force=force)
1259 if not force and inc:
1270 if not force and inc:
1260 self.ui.warn(_("abort: unsynced remote changes!\n"))
1271 self.ui.warn(_("abort: unsynced remote changes!\n"))
1261 self.ui.status(_("(did you forget to sync?"
1272 self.ui.status(_("(did you forget to sync?"
1262 " use push -f to force)\n"))
1273 " use push -f to force)\n"))
1263 return None, 1
1274 return None, 1
1264
1275
1265 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1276 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1266 if revs is not None:
1277 if revs is not None:
1267 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1278 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1268 else:
1279 else:
1269 bases, heads = update, self.changelog.heads()
1280 bases, heads = update, self.changelog.heads()
1270
1281
1271 if not bases:
1282 if not bases:
1272 self.ui.status(_("no changes found\n"))
1283 self.ui.status(_("no changes found\n"))
1273 return None, 1
1284 return None, 1
1274 elif not force:
1285 elif not force:
1275 # FIXME we don't properly detect creation of new heads
1286 # FIXME we don't properly detect creation of new heads
1276 # in the push -r case, assume the user knows what he's doing
1287 # in the push -r case, assume the user knows what he's doing
1277 if not revs and len(remote_heads) < len(heads) \
1288 if not revs and len(remote_heads) < len(heads) \
1278 and remote_heads != [nullid]:
1289 and remote_heads != [nullid]:
1279 self.ui.warn(_("abort: push creates new remote branches!\n"))
1290 self.ui.warn(_("abort: push creates new remote branches!\n"))
1280 self.ui.status(_("(did you forget to merge?"
1291 self.ui.status(_("(did you forget to merge?"
1281 " use push -f to force)\n"))
1292 " use push -f to force)\n"))
1282 return None, 1
1293 return None, 1
1283
1294
1284 if revs is None:
1295 if revs is None:
1285 cg = self.changegroup(update, 'push')
1296 cg = self.changegroup(update, 'push')
1286 else:
1297 else:
1287 cg = self.changegroupsubset(update, revs, 'push')
1298 cg = self.changegroupsubset(update, revs, 'push')
1288 return cg, remote_heads
1299 return cg, remote_heads
1289
1300
1290 def push_addchangegroup(self, remote, force, revs):
1301 def push_addchangegroup(self, remote, force, revs):
1291 lock = remote.lock()
1302 lock = remote.lock()
1292
1303
1293 ret = self.prepush(remote, force, revs)
1304 ret = self.prepush(remote, force, revs)
1294 if ret[0] is not None:
1305 if ret[0] is not None:
1295 cg, remote_heads = ret
1306 cg, remote_heads = ret
1296 return remote.addchangegroup(cg, 'push', self.url())
1307 return remote.addchangegroup(cg, 'push', self.url())
1297 return ret[1]
1308 return ret[1]
1298
1309
1299 def push_unbundle(self, remote, force, revs):
1310 def push_unbundle(self, remote, force, revs):
1300 # local repo finds heads on server, finds out what revs it
1311 # local repo finds heads on server, finds out what revs it
1301 # must push. once revs transferred, if server finds it has
1312 # must push. once revs transferred, if server finds it has
1302 # different heads (someone else won commit/push race), server
1313 # different heads (someone else won commit/push race), server
1303 # aborts.
1314 # aborts.
1304
1315
1305 ret = self.prepush(remote, force, revs)
1316 ret = self.prepush(remote, force, revs)
1306 if ret[0] is not None:
1317 if ret[0] is not None:
1307 cg, remote_heads = ret
1318 cg, remote_heads = ret
1308 if force: remote_heads = ['force']
1319 if force: remote_heads = ['force']
1309 return remote.unbundle(cg, remote_heads, 'push')
1320 return remote.unbundle(cg, remote_heads, 'push')
1310 return ret[1]
1321 return ret[1]
1311
1322
1312 def changegroupsubset(self, bases, heads, source):
1323 def changegroupsubset(self, bases, heads, source):
1313 """This function generates a changegroup consisting of all the nodes
1324 """This function generates a changegroup consisting of all the nodes
1314 that are descendents of any of the bases, and ancestors of any of
1325 that are descendents of any of the bases, and ancestors of any of
1315 the heads.
1326 the heads.
1316
1327
1317 It is fairly complex as determining which filenodes and which
1328 It is fairly complex as determining which filenodes and which
1318 manifest nodes need to be included for the changeset to be complete
1329 manifest nodes need to be included for the changeset to be complete
1319 is non-trivial.
1330 is non-trivial.
1320
1331
1321 Another wrinkle is doing the reverse, figuring out which changeset in
1332 Another wrinkle is doing the reverse, figuring out which changeset in
1322 the changegroup a particular filenode or manifestnode belongs to."""
1333 the changegroup a particular filenode or manifestnode belongs to."""
1323
1334
1324 self.hook('preoutgoing', throw=True, source=source)
1335 self.hook('preoutgoing', throw=True, source=source)
1325
1336
1326 # Set up some initial variables
1337 # Set up some initial variables
1327 # Make it easy to refer to self.changelog
1338 # Make it easy to refer to self.changelog
1328 cl = self.changelog
1339 cl = self.changelog
1329 # msng is short for missing - compute the list of changesets in this
1340 # msng is short for missing - compute the list of changesets in this
1330 # changegroup.
1341 # changegroup.
1331 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1342 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1332 # Some bases may turn out to be superfluous, and some heads may be
1343 # Some bases may turn out to be superfluous, and some heads may be
1333 # too. nodesbetween will return the minimal set of bases and heads
1344 # too. nodesbetween will return the minimal set of bases and heads
1334 # necessary to re-create the changegroup.
1345 # necessary to re-create the changegroup.
1335
1346
1336 # Known heads are the list of heads that it is assumed the recipient
1347 # Known heads are the list of heads that it is assumed the recipient
1337 # of this changegroup will know about.
1348 # of this changegroup will know about.
1338 knownheads = {}
1349 knownheads = {}
1339 # We assume that all parents of bases are known heads.
1350 # We assume that all parents of bases are known heads.
1340 for n in bases:
1351 for n in bases:
1341 for p in cl.parents(n):
1352 for p in cl.parents(n):
1342 if p != nullid:
1353 if p != nullid:
1343 knownheads[p] = 1
1354 knownheads[p] = 1
1344 knownheads = knownheads.keys()
1355 knownheads = knownheads.keys()
1345 if knownheads:
1356 if knownheads:
1346 # Now that we know what heads are known, we can compute which
1357 # Now that we know what heads are known, we can compute which
1347 # changesets are known. The recipient must know about all
1358 # changesets are known. The recipient must know about all
1348 # changesets required to reach the known heads from the null
1359 # changesets required to reach the known heads from the null
1349 # changeset.
1360 # changeset.
1350 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1361 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1351 junk = None
1362 junk = None
1352 # Transform the list into an ersatz set.
1363 # Transform the list into an ersatz set.
1353 has_cl_set = dict.fromkeys(has_cl_set)
1364 has_cl_set = dict.fromkeys(has_cl_set)
1354 else:
1365 else:
1355 # If there were no known heads, the recipient cannot be assumed to
1366 # If there were no known heads, the recipient cannot be assumed to
1356 # know about any changesets.
1367 # know about any changesets.
1357 has_cl_set = {}
1368 has_cl_set = {}
1358
1369
1359 # Make it easy to refer to self.manifest
1370 # Make it easy to refer to self.manifest
1360 mnfst = self.manifest
1371 mnfst = self.manifest
1361 # We don't know which manifests are missing yet
1372 # We don't know which manifests are missing yet
1362 msng_mnfst_set = {}
1373 msng_mnfst_set = {}
1363 # Nor do we know which filenodes are missing.
1374 # Nor do we know which filenodes are missing.
1364 msng_filenode_set = {}
1375 msng_filenode_set = {}
1365
1376
1366 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1377 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1367 junk = None
1378 junk = None
1368
1379
1369 # A changeset always belongs to itself, so the changenode lookup
1380 # A changeset always belongs to itself, so the changenode lookup
1370 # function for a changenode is identity.
1381 # function for a changenode is identity.
1371 def identity(x):
1382 def identity(x):
1372 return x
1383 return x
1373
1384
1374 # A function generating function. Sets up an environment for the
1385 # A function generating function. Sets up an environment for the
1375 # inner function.
1386 # inner function.
1376 def cmp_by_rev_func(revlog):
1387 def cmp_by_rev_func(revlog):
1377 # Compare two nodes by their revision number in the environment's
1388 # Compare two nodes by their revision number in the environment's
1378 # revision history. Since the revision number both represents the
1389 # revision history. Since the revision number both represents the
1379 # most efficient order to read the nodes in, and represents a
1390 # most efficient order to read the nodes in, and represents a
1380 # topological sorting of the nodes, this function is often useful.
1391 # topological sorting of the nodes, this function is often useful.
1381 def cmp_by_rev(a, b):
1392 def cmp_by_rev(a, b):
1382 return cmp(revlog.rev(a), revlog.rev(b))
1393 return cmp(revlog.rev(a), revlog.rev(b))
1383 return cmp_by_rev
1394 return cmp_by_rev
1384
1395
1385 # If we determine that a particular file or manifest node must be a
1396 # If we determine that a particular file or manifest node must be a
1386 # node that the recipient of the changegroup will already have, we can
1397 # node that the recipient of the changegroup will already have, we can
1387 # also assume the recipient will have all the parents. This function
1398 # also assume the recipient will have all the parents. This function
1388 # prunes them from the set of missing nodes.
1399 # prunes them from the set of missing nodes.
1389 def prune_parents(revlog, hasset, msngset):
1400 def prune_parents(revlog, hasset, msngset):
1390 haslst = hasset.keys()
1401 haslst = hasset.keys()
1391 haslst.sort(cmp_by_rev_func(revlog))
1402 haslst.sort(cmp_by_rev_func(revlog))
1392 for node in haslst:
1403 for node in haslst:
1393 parentlst = [p for p in revlog.parents(node) if p != nullid]
1404 parentlst = [p for p in revlog.parents(node) if p != nullid]
1394 while parentlst:
1405 while parentlst:
1395 n = parentlst.pop()
1406 n = parentlst.pop()
1396 if n not in hasset:
1407 if n not in hasset:
1397 hasset[n] = 1
1408 hasset[n] = 1
1398 p = [p for p in revlog.parents(n) if p != nullid]
1409 p = [p for p in revlog.parents(n) if p != nullid]
1399 parentlst.extend(p)
1410 parentlst.extend(p)
1400 for n in hasset:
1411 for n in hasset:
1401 msngset.pop(n, None)
1412 msngset.pop(n, None)
1402
1413
1403 # This is a function generating function used to set up an environment
1414 # This is a function generating function used to set up an environment
1404 # for the inner function to execute in.
1415 # for the inner function to execute in.
1405 def manifest_and_file_collector(changedfileset):
1416 def manifest_and_file_collector(changedfileset):
1406 # This is an information gathering function that gathers
1417 # This is an information gathering function that gathers
1407 # information from each changeset node that goes out as part of
1418 # information from each changeset node that goes out as part of
1408 # the changegroup. The information gathered is a list of which
1419 # the changegroup. The information gathered is a list of which
1409 # manifest nodes are potentially required (the recipient may
1420 # manifest nodes are potentially required (the recipient may
1410 # already have them) and total list of all files which were
1421 # already have them) and total list of all files which were
1411 # changed in any changeset in the changegroup.
1422 # changed in any changeset in the changegroup.
1412 #
1423 #
1413 # We also remember the first changenode we saw any manifest
1424 # We also remember the first changenode we saw any manifest
1414 # referenced by so we can later determine which changenode 'owns'
1425 # referenced by so we can later determine which changenode 'owns'
1415 # the manifest.
1426 # the manifest.
1416 def collect_manifests_and_files(clnode):
1427 def collect_manifests_and_files(clnode):
1417 c = cl.read(clnode)
1428 c = cl.read(clnode)
1418 for f in c[3]:
1429 for f in c[3]:
1419 # This is to make sure we only have one instance of each
1430 # This is to make sure we only have one instance of each
1420 # filename string for each filename.
1431 # filename string for each filename.
1421 changedfileset.setdefault(f, f)
1432 changedfileset.setdefault(f, f)
1422 msng_mnfst_set.setdefault(c[0], clnode)
1433 msng_mnfst_set.setdefault(c[0], clnode)
1423 return collect_manifests_and_files
1434 return collect_manifests_and_files
1424
1435
1425 # Figure out which manifest nodes (of the ones we think might be part
1436 # Figure out which manifest nodes (of the ones we think might be part
1426 # of the changegroup) the recipient must know about and remove them
1437 # of the changegroup) the recipient must know about and remove them
1427 # from the changegroup.
1438 # from the changegroup.
1428 def prune_manifests():
1439 def prune_manifests():
1429 has_mnfst_set = {}
1440 has_mnfst_set = {}
1430 for n in msng_mnfst_set:
1441 for n in msng_mnfst_set:
1431 # If a 'missing' manifest thinks it belongs to a changenode
1442 # If a 'missing' manifest thinks it belongs to a changenode
1432 # the recipient is assumed to have, obviously the recipient
1443 # the recipient is assumed to have, obviously the recipient
1433 # must have that manifest.
1444 # must have that manifest.
1434 linknode = cl.node(mnfst.linkrev(n))
1445 linknode = cl.node(mnfst.linkrev(n))
1435 if linknode in has_cl_set:
1446 if linknode in has_cl_set:
1436 has_mnfst_set[n] = 1
1447 has_mnfst_set[n] = 1
1437 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1448 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1438
1449
1439 # Use the information collected in collect_manifests_and_files to say
1450 # Use the information collected in collect_manifests_and_files to say
1440 # which changenode any manifestnode belongs to.
1451 # which changenode any manifestnode belongs to.
1441 def lookup_manifest_link(mnfstnode):
1452 def lookup_manifest_link(mnfstnode):
1442 return msng_mnfst_set[mnfstnode]
1453 return msng_mnfst_set[mnfstnode]
1443
1454
1444 # A function generating function that sets up the initial environment
1455 # A function generating function that sets up the initial environment
1445 # the inner function.
1456 # the inner function.
1446 def filenode_collector(changedfiles):
1457 def filenode_collector(changedfiles):
1447 next_rev = [0]
1458 next_rev = [0]
1448 # This gathers information from each manifestnode included in the
1459 # This gathers information from each manifestnode included in the
1449 # changegroup about which filenodes the manifest node references
1460 # changegroup about which filenodes the manifest node references
1450 # so we can include those in the changegroup too.
1461 # so we can include those in the changegroup too.
1451 #
1462 #
1452 # It also remembers which changenode each filenode belongs to. It
1463 # It also remembers which changenode each filenode belongs to. It
1453 # does this by assuming the a filenode belongs to the changenode
1464 # does this by assuming the a filenode belongs to the changenode
1454 # the first manifest that references it belongs to.
1465 # the first manifest that references it belongs to.
1455 def collect_msng_filenodes(mnfstnode):
1466 def collect_msng_filenodes(mnfstnode):
1456 r = mnfst.rev(mnfstnode)
1467 r = mnfst.rev(mnfstnode)
1457 if r == next_rev[0]:
1468 if r == next_rev[0]:
1458 # If the last rev we looked at was the one just previous,
1469 # If the last rev we looked at was the one just previous,
1459 # we only need to see a diff.
1470 # we only need to see a diff.
1460 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1471 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1461 # For each line in the delta
1472 # For each line in the delta
1462 for dline in delta.splitlines():
1473 for dline in delta.splitlines():
1463 # get the filename and filenode for that line
1474 # get the filename and filenode for that line
1464 f, fnode = dline.split('\0')
1475 f, fnode = dline.split('\0')
1465 fnode = bin(fnode[:40])
1476 fnode = bin(fnode[:40])
1466 f = changedfiles.get(f, None)
1477 f = changedfiles.get(f, None)
1467 # And if the file is in the list of files we care
1478 # And if the file is in the list of files we care
1468 # about.
1479 # about.
1469 if f is not None:
1480 if f is not None:
1470 # Get the changenode this manifest belongs to
1481 # Get the changenode this manifest belongs to
1471 clnode = msng_mnfst_set[mnfstnode]
1482 clnode = msng_mnfst_set[mnfstnode]
1472 # Create the set of filenodes for the file if
1483 # Create the set of filenodes for the file if
1473 # there isn't one already.
1484 # there isn't one already.
1474 ndset = msng_filenode_set.setdefault(f, {})
1485 ndset = msng_filenode_set.setdefault(f, {})
1475 # And set the filenode's changelog node to the
1486 # And set the filenode's changelog node to the
1476 # manifest's if it hasn't been set already.
1487 # manifest's if it hasn't been set already.
1477 ndset.setdefault(fnode, clnode)
1488 ndset.setdefault(fnode, clnode)
1478 else:
1489 else:
1479 # Otherwise we need a full manifest.
1490 # Otherwise we need a full manifest.
1480 m = mnfst.read(mnfstnode)
1491 m = mnfst.read(mnfstnode)
1481 # For every file in we care about.
1492 # For every file in we care about.
1482 for f in changedfiles:
1493 for f in changedfiles:
1483 fnode = m.get(f, None)
1494 fnode = m.get(f, None)
1484 # If it's in the manifest
1495 # If it's in the manifest
1485 if fnode is not None:
1496 if fnode is not None:
1486 # See comments above.
1497 # See comments above.
1487 clnode = msng_mnfst_set[mnfstnode]
1498 clnode = msng_mnfst_set[mnfstnode]
1488 ndset = msng_filenode_set.setdefault(f, {})
1499 ndset = msng_filenode_set.setdefault(f, {})
1489 ndset.setdefault(fnode, clnode)
1500 ndset.setdefault(fnode, clnode)
1490 # Remember the revision we hope to see next.
1501 # Remember the revision we hope to see next.
1491 next_rev[0] = r + 1
1502 next_rev[0] = r + 1
1492 return collect_msng_filenodes
1503 return collect_msng_filenodes
1493
1504
1494 # We have a list of filenodes we think we need for a file, lets remove
1505 # We have a list of filenodes we think we need for a file, lets remove
1495 # all those we now the recipient must have.
1506 # all those we now the recipient must have.
1496 def prune_filenodes(f, filerevlog):
1507 def prune_filenodes(f, filerevlog):
1497 msngset = msng_filenode_set[f]
1508 msngset = msng_filenode_set[f]
1498 hasset = {}
1509 hasset = {}
1499 # If a 'missing' filenode thinks it belongs to a changenode we
1510 # If a 'missing' filenode thinks it belongs to a changenode we
1500 # assume the recipient must have, then the recipient must have
1511 # assume the recipient must have, then the recipient must have
1501 # that filenode.
1512 # that filenode.
1502 for n in msngset:
1513 for n in msngset:
1503 clnode = cl.node(filerevlog.linkrev(n))
1514 clnode = cl.node(filerevlog.linkrev(n))
1504 if clnode in has_cl_set:
1515 if clnode in has_cl_set:
1505 hasset[n] = 1
1516 hasset[n] = 1
1506 prune_parents(filerevlog, hasset, msngset)
1517 prune_parents(filerevlog, hasset, msngset)
1507
1518
1508 # A function generator function that sets up the a context for the
1519 # A function generator function that sets up the a context for the
1509 # inner function.
1520 # inner function.
1510 def lookup_filenode_link_func(fname):
1521 def lookup_filenode_link_func(fname):
1511 msngset = msng_filenode_set[fname]
1522 msngset = msng_filenode_set[fname]
1512 # Lookup the changenode the filenode belongs to.
1523 # Lookup the changenode the filenode belongs to.
1513 def lookup_filenode_link(fnode):
1524 def lookup_filenode_link(fnode):
1514 return msngset[fnode]
1525 return msngset[fnode]
1515 return lookup_filenode_link
1526 return lookup_filenode_link
1516
1527
1517 # Now that we have all theses utility functions to help out and
1528 # Now that we have all theses utility functions to help out and
1518 # logically divide up the task, generate the group.
1529 # logically divide up the task, generate the group.
1519 def gengroup():
1530 def gengroup():
1520 # The set of changed files starts empty.
1531 # The set of changed files starts empty.
1521 changedfiles = {}
1532 changedfiles = {}
1522 # Create a changenode group generator that will call our functions
1533 # Create a changenode group generator that will call our functions
1523 # back to lookup the owning changenode and collect information.
1534 # back to lookup the owning changenode and collect information.
1524 group = cl.group(msng_cl_lst, identity,
1535 group = cl.group(msng_cl_lst, identity,
1525 manifest_and_file_collector(changedfiles))
1536 manifest_and_file_collector(changedfiles))
1526 for chnk in group:
1537 for chnk in group:
1527 yield chnk
1538 yield chnk
1528
1539
1529 # The list of manifests has been collected by the generator
1540 # The list of manifests has been collected by the generator
1530 # calling our functions back.
1541 # calling our functions back.
1531 prune_manifests()
1542 prune_manifests()
1532 msng_mnfst_lst = msng_mnfst_set.keys()
1543 msng_mnfst_lst = msng_mnfst_set.keys()
1533 # Sort the manifestnodes by revision number.
1544 # Sort the manifestnodes by revision number.
1534 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1545 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1535 # Create a generator for the manifestnodes that calls our lookup
1546 # Create a generator for the manifestnodes that calls our lookup
1536 # and data collection functions back.
1547 # and data collection functions back.
1537 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1548 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1538 filenode_collector(changedfiles))
1549 filenode_collector(changedfiles))
1539 for chnk in group:
1550 for chnk in group:
1540 yield chnk
1551 yield chnk
1541
1552
1542 # These are no longer needed, dereference and toss the memory for
1553 # These are no longer needed, dereference and toss the memory for
1543 # them.
1554 # them.
1544 msng_mnfst_lst = None
1555 msng_mnfst_lst = None
1545 msng_mnfst_set.clear()
1556 msng_mnfst_set.clear()
1546
1557
1547 changedfiles = changedfiles.keys()
1558 changedfiles = changedfiles.keys()
1548 changedfiles.sort()
1559 changedfiles.sort()
1549 # Go through all our files in order sorted by name.
1560 # Go through all our files in order sorted by name.
1550 for fname in changedfiles:
1561 for fname in changedfiles:
1551 filerevlog = self.file(fname)
1562 filerevlog = self.file(fname)
1552 # Toss out the filenodes that the recipient isn't really
1563 # Toss out the filenodes that the recipient isn't really
1553 # missing.
1564 # missing.
1554 if msng_filenode_set.has_key(fname):
1565 if msng_filenode_set.has_key(fname):
1555 prune_filenodes(fname, filerevlog)
1566 prune_filenodes(fname, filerevlog)
1556 msng_filenode_lst = msng_filenode_set[fname].keys()
1567 msng_filenode_lst = msng_filenode_set[fname].keys()
1557 else:
1568 else:
1558 msng_filenode_lst = []
1569 msng_filenode_lst = []
1559 # If any filenodes are left, generate the group for them,
1570 # If any filenodes are left, generate the group for them,
1560 # otherwise don't bother.
1571 # otherwise don't bother.
1561 if len(msng_filenode_lst) > 0:
1572 if len(msng_filenode_lst) > 0:
1562 yield changegroup.genchunk(fname)
1573 yield changegroup.genchunk(fname)
1563 # Sort the filenodes by their revision #
1574 # Sort the filenodes by their revision #
1564 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1575 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1565 # Create a group generator and only pass in a changenode
1576 # Create a group generator and only pass in a changenode
1566 # lookup function as we need to collect no information
1577 # lookup function as we need to collect no information
1567 # from filenodes.
1578 # from filenodes.
1568 group = filerevlog.group(msng_filenode_lst,
1579 group = filerevlog.group(msng_filenode_lst,
1569 lookup_filenode_link_func(fname))
1580 lookup_filenode_link_func(fname))
1570 for chnk in group:
1581 for chnk in group:
1571 yield chnk
1582 yield chnk
1572 if msng_filenode_set.has_key(fname):
1583 if msng_filenode_set.has_key(fname):
1573 # Don't need this anymore, toss it to free memory.
1584 # Don't need this anymore, toss it to free memory.
1574 del msng_filenode_set[fname]
1585 del msng_filenode_set[fname]
1575 # Signal that no more groups are left.
1586 # Signal that no more groups are left.
1576 yield changegroup.closechunk()
1587 yield changegroup.closechunk()
1577
1588
1578 if msng_cl_lst:
1589 if msng_cl_lst:
1579 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1590 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1580
1591
1581 return util.chunkbuffer(gengroup())
1592 return util.chunkbuffer(gengroup())
1582
1593
1583 def changegroup(self, basenodes, source):
1594 def changegroup(self, basenodes, source):
1584 """Generate a changegroup of all nodes that we have that a recipient
1595 """Generate a changegroup of all nodes that we have that a recipient
1585 doesn't.
1596 doesn't.
1586
1597
1587 This is much easier than the previous function as we can assume that
1598 This is much easier than the previous function as we can assume that
1588 the recipient has any changenode we aren't sending them."""
1599 the recipient has any changenode we aren't sending them."""
1589
1600
1590 self.hook('preoutgoing', throw=True, source=source)
1601 self.hook('preoutgoing', throw=True, source=source)
1591
1602
1592 cl = self.changelog
1603 cl = self.changelog
1593 nodes = cl.nodesbetween(basenodes, None)[0]
1604 nodes = cl.nodesbetween(basenodes, None)[0]
1594 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1605 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1595
1606
1596 def identity(x):
1607 def identity(x):
1597 return x
1608 return x
1598
1609
1599 def gennodelst(revlog):
1610 def gennodelst(revlog):
1600 for r in xrange(0, revlog.count()):
1611 for r in xrange(0, revlog.count()):
1601 n = revlog.node(r)
1612 n = revlog.node(r)
1602 if revlog.linkrev(n) in revset:
1613 if revlog.linkrev(n) in revset:
1603 yield n
1614 yield n
1604
1615
1605 def changed_file_collector(changedfileset):
1616 def changed_file_collector(changedfileset):
1606 def collect_changed_files(clnode):
1617 def collect_changed_files(clnode):
1607 c = cl.read(clnode)
1618 c = cl.read(clnode)
1608 for fname in c[3]:
1619 for fname in c[3]:
1609 changedfileset[fname] = 1
1620 changedfileset[fname] = 1
1610 return collect_changed_files
1621 return collect_changed_files
1611
1622
1612 def lookuprevlink_func(revlog):
1623 def lookuprevlink_func(revlog):
1613 def lookuprevlink(n):
1624 def lookuprevlink(n):
1614 return cl.node(revlog.linkrev(n))
1625 return cl.node(revlog.linkrev(n))
1615 return lookuprevlink
1626 return lookuprevlink
1616
1627
1617 def gengroup():
1628 def gengroup():
1618 # construct a list of all changed files
1629 # construct a list of all changed files
1619 changedfiles = {}
1630 changedfiles = {}
1620
1631
1621 for chnk in cl.group(nodes, identity,
1632 for chnk in cl.group(nodes, identity,
1622 changed_file_collector(changedfiles)):
1633 changed_file_collector(changedfiles)):
1623 yield chnk
1634 yield chnk
1624 changedfiles = changedfiles.keys()
1635 changedfiles = changedfiles.keys()
1625 changedfiles.sort()
1636 changedfiles.sort()
1626
1637
1627 mnfst = self.manifest
1638 mnfst = self.manifest
1628 nodeiter = gennodelst(mnfst)
1639 nodeiter = gennodelst(mnfst)
1629 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1640 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1630 yield chnk
1641 yield chnk
1631
1642
1632 for fname in changedfiles:
1643 for fname in changedfiles:
1633 filerevlog = self.file(fname)
1644 filerevlog = self.file(fname)
1634 nodeiter = gennodelst(filerevlog)
1645 nodeiter = gennodelst(filerevlog)
1635 nodeiter = list(nodeiter)
1646 nodeiter = list(nodeiter)
1636 if nodeiter:
1647 if nodeiter:
1637 yield changegroup.genchunk(fname)
1648 yield changegroup.genchunk(fname)
1638 lookup = lookuprevlink_func(filerevlog)
1649 lookup = lookuprevlink_func(filerevlog)
1639 for chnk in filerevlog.group(nodeiter, lookup):
1650 for chnk in filerevlog.group(nodeiter, lookup):
1640 yield chnk
1651 yield chnk
1641
1652
1642 yield changegroup.closechunk()
1653 yield changegroup.closechunk()
1643
1654
1644 if nodes:
1655 if nodes:
1645 self.hook('outgoing', node=hex(nodes[0]), source=source)
1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1646
1657
1647 return util.chunkbuffer(gengroup())
1658 return util.chunkbuffer(gengroup())
1648
1659
1649 def addchangegroup(self, source, srctype, url):
1660 def addchangegroup(self, source, srctype, url):
1650 """add changegroup to repo.
1661 """add changegroup to repo.
1651 returns number of heads modified or added + 1."""
1662 returns number of heads modified or added + 1."""
1652
1663
1653 def csmap(x):
1664 def csmap(x):
1654 self.ui.debug(_("add changeset %s\n") % short(x))
1665 self.ui.debug(_("add changeset %s\n") % short(x))
1655 return cl.count()
1666 return cl.count()
1656
1667
1657 def revmap(x):
1668 def revmap(x):
1658 return cl.rev(x)
1669 return cl.rev(x)
1659
1670
1660 if not source:
1671 if not source:
1661 return 0
1672 return 0
1662
1673
1663 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1674 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1664
1675
1665 changesets = files = revisions = 0
1676 changesets = files = revisions = 0
1666
1677
1667 tr = self.transaction()
1678 tr = self.transaction()
1668
1679
1669 # write changelog data to temp files so concurrent readers will not see
1680 # write changelog data to temp files so concurrent readers will not see
1670 # inconsistent view
1681 # inconsistent view
1671 cl = None
1682 cl = None
1672 try:
1683 try:
1673 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1684 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1674
1685
1675 oldheads = len(cl.heads())
1686 oldheads = len(cl.heads())
1676
1687
1677 # pull off the changeset group
1688 # pull off the changeset group
1678 self.ui.status(_("adding changesets\n"))
1689 self.ui.status(_("adding changesets\n"))
1679 cor = cl.count() - 1
1690 cor = cl.count() - 1
1680 chunkiter = changegroup.chunkiter(source)
1691 chunkiter = changegroup.chunkiter(source)
1681 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1692 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1682 raise util.Abort(_("received changelog group is empty"))
1693 raise util.Abort(_("received changelog group is empty"))
1683 cnr = cl.count() - 1
1694 cnr = cl.count() - 1
1684 changesets = cnr - cor
1695 changesets = cnr - cor
1685
1696
1686 # pull off the manifest group
1697 # pull off the manifest group
1687 self.ui.status(_("adding manifests\n"))
1698 self.ui.status(_("adding manifests\n"))
1688 chunkiter = changegroup.chunkiter(source)
1699 chunkiter = changegroup.chunkiter(source)
1689 # no need to check for empty manifest group here:
1700 # no need to check for empty manifest group here:
1690 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1701 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1691 # no new manifest will be created and the manifest group will
1702 # no new manifest will be created and the manifest group will
1692 # be empty during the pull
1703 # be empty during the pull
1693 self.manifest.addgroup(chunkiter, revmap, tr)
1704 self.manifest.addgroup(chunkiter, revmap, tr)
1694
1705
1695 # process the files
1706 # process the files
1696 self.ui.status(_("adding file changes\n"))
1707 self.ui.status(_("adding file changes\n"))
1697 while 1:
1708 while 1:
1698 f = changegroup.getchunk(source)
1709 f = changegroup.getchunk(source)
1699 if not f:
1710 if not f:
1700 break
1711 break
1701 self.ui.debug(_("adding %s revisions\n") % f)
1712 self.ui.debug(_("adding %s revisions\n") % f)
1702 fl = self.file(f)
1713 fl = self.file(f)
1703 o = fl.count()
1714 o = fl.count()
1704 chunkiter = changegroup.chunkiter(source)
1715 chunkiter = changegroup.chunkiter(source)
1705 if fl.addgroup(chunkiter, revmap, tr) is None:
1716 if fl.addgroup(chunkiter, revmap, tr) is None:
1706 raise util.Abort(_("received file revlog group is empty"))
1717 raise util.Abort(_("received file revlog group is empty"))
1707 revisions += fl.count() - o
1718 revisions += fl.count() - o
1708 files += 1
1719 files += 1
1709
1720
1710 cl.writedata()
1721 cl.writedata()
1711 finally:
1722 finally:
1712 if cl:
1723 if cl:
1713 cl.cleanup()
1724 cl.cleanup()
1714
1725
1715 # make changelog see real files again
1726 # make changelog see real files again
1716 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1727 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1717 self.changelog.checkinlinesize(tr)
1728 self.changelog.checkinlinesize(tr)
1718
1729
1719 newheads = len(self.changelog.heads())
1730 newheads = len(self.changelog.heads())
1720 heads = ""
1731 heads = ""
1721 if oldheads and newheads != oldheads:
1732 if oldheads and newheads != oldheads:
1722 heads = _(" (%+d heads)") % (newheads - oldheads)
1733 heads = _(" (%+d heads)") % (newheads - oldheads)
1723
1734
1724 self.ui.status(_("added %d changesets"
1735 self.ui.status(_("added %d changesets"
1725 " with %d changes to %d files%s\n")
1736 " with %d changes to %d files%s\n")
1726 % (changesets, revisions, files, heads))
1737 % (changesets, revisions, files, heads))
1727
1738
1728 if changesets > 0:
1739 if changesets > 0:
1729 self.hook('pretxnchangegroup', throw=True,
1740 self.hook('pretxnchangegroup', throw=True,
1730 node=hex(self.changelog.node(cor+1)), source=srctype,
1741 node=hex(self.changelog.node(cor+1)), source=srctype,
1731 url=url)
1742 url=url)
1732
1743
1733 tr.close()
1744 tr.close()
1734
1745
1735 if changesets > 0:
1746 if changesets > 0:
1736 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1747 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1737 source=srctype, url=url)
1748 source=srctype, url=url)
1738
1749
1739 for i in range(cor + 1, cnr + 1):
1750 for i in range(cor + 1, cnr + 1):
1740 self.hook("incoming", node=hex(self.changelog.node(i)),
1751 self.hook("incoming", node=hex(self.changelog.node(i)),
1741 source=srctype, url=url)
1752 source=srctype, url=url)
1742
1753
1743 return newheads - oldheads + 1
1754 return newheads - oldheads + 1
1744
1755
1745
1756
1746 def stream_in(self, remote):
1757 def stream_in(self, remote):
1747 fp = remote.stream_out()
1758 fp = remote.stream_out()
1748 resp = int(fp.readline())
1759 resp = int(fp.readline())
1749 if resp != 0:
1760 if resp != 0:
1750 raise util.Abort(_('operation forbidden by server'))
1761 raise util.Abort(_('operation forbidden by server'))
1751 self.ui.status(_('streaming all changes\n'))
1762 self.ui.status(_('streaming all changes\n'))
1752 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1763 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1753 self.ui.status(_('%d files to transfer, %s of data\n') %
1764 self.ui.status(_('%d files to transfer, %s of data\n') %
1754 (total_files, util.bytecount(total_bytes)))
1765 (total_files, util.bytecount(total_bytes)))
1755 start = time.time()
1766 start = time.time()
1756 for i in xrange(total_files):
1767 for i in xrange(total_files):
1757 name, size = fp.readline().split('\0', 1)
1768 name, size = fp.readline().split('\0', 1)
1758 size = int(size)
1769 size = int(size)
1759 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1770 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1760 ofp = self.opener(name, 'w')
1771 ofp = self.opener(name, 'w')
1761 for chunk in util.filechunkiter(fp, limit=size):
1772 for chunk in util.filechunkiter(fp, limit=size):
1762 ofp.write(chunk)
1773 ofp.write(chunk)
1763 ofp.close()
1774 ofp.close()
1764 elapsed = time.time() - start
1775 elapsed = time.time() - start
1765 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1776 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1766 (util.bytecount(total_bytes), elapsed,
1777 (util.bytecount(total_bytes), elapsed,
1767 util.bytecount(total_bytes / elapsed)))
1778 util.bytecount(total_bytes / elapsed)))
1768 self.reload()
1779 self.reload()
1769 return len(self.heads()) + 1
1780 return len(self.heads()) + 1
1770
1781
1771 def clone(self, remote, heads=[], stream=False):
1782 def clone(self, remote, heads=[], stream=False):
1772 '''clone remote repository.
1783 '''clone remote repository.
1773
1784
1774 keyword arguments:
1785 keyword arguments:
1775 heads: list of revs to clone (forces use of pull)
1786 heads: list of revs to clone (forces use of pull)
1776 stream: use streaming clone if possible'''
1787 stream: use streaming clone if possible'''
1777
1788
1778 # now, all clients that can request uncompressed clones can
1789 # now, all clients that can request uncompressed clones can
1779 # read repo formats supported by all servers that can serve
1790 # read repo formats supported by all servers that can serve
1780 # them.
1791 # them.
1781
1792
1782 # if revlog format changes, client will have to check version
1793 # if revlog format changes, client will have to check version
1783 # and format flags on "stream" capability, and use
1794 # and format flags on "stream" capability, and use
1784 # uncompressed only if compatible.
1795 # uncompressed only if compatible.
1785
1796
1786 if stream and not heads and remote.capable('stream'):
1797 if stream and not heads and remote.capable('stream'):
1787 return self.stream_in(remote)
1798 return self.stream_in(remote)
1788 return self.pull(remote, heads)
1799 return self.pull(remote, heads)
1789
1800
1790 # used to avoid circular references so destructors work
1801 # used to avoid circular references so destructors work
1791 def aftertrans(base):
1802 def aftertrans(base):
1792 p = base
1803 p = base
1793 def a():
1804 def a():
1794 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1805 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1795 util.rename(os.path.join(p, "journal.dirstate"),
1806 util.rename(os.path.join(p, "journal.dirstate"),
1796 os.path.join(p, "undo.dirstate"))
1807 os.path.join(p, "undo.dirstate"))
1797 return a
1808 return a
1798
1809
1799 def instance(ui, path, create):
1810 def instance(ui, path, create):
1800 return localrepository(ui, util.drop_scheme('file', path), create)
1811 return localrepository(ui, util.drop_scheme('file', path), create)
1801
1812
1802 def islocal(path):
1813 def islocal(path):
1803 return True
1814 return True
General Comments 0
You need to be logged in to leave comments. Login now