##// END OF EJS Templates
merge with stable
Matt Mackall -
r13001:cc4e13c9 merge default
parent child Browse files
Show More
@@ -1,1099 +1,1104 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, error, util, subrepo, patch
10 import ancestor, bdiff, error, util, subrepo, patch
11 import os, errno, stat
11 import os, errno, stat
12
12
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14
14
15 class changectx(object):
15 class changectx(object):
16 """A changecontext object makes access to data related to a particular
16 """A changecontext object makes access to data related to a particular
17 changeset convenient."""
17 changeset convenient."""
18 def __init__(self, repo, changeid=''):
18 def __init__(self, repo, changeid=''):
19 """changeid is a revision number, node, or tag"""
19 """changeid is a revision number, node, or tag"""
20 if changeid == '':
20 if changeid == '':
21 changeid = '.'
21 changeid = '.'
22 self._repo = repo
22 self._repo = repo
23 if isinstance(changeid, (long, int)):
23 if isinstance(changeid, (long, int)):
24 self._rev = changeid
24 self._rev = changeid
25 self._node = self._repo.changelog.node(changeid)
25 self._node = self._repo.changelog.node(changeid)
26 else:
26 else:
27 self._node = self._repo.lookup(changeid)
27 self._node = self._repo.lookup(changeid)
28 self._rev = self._repo.changelog.rev(self._node)
28 self._rev = self._repo.changelog.rev(self._node)
29
29
30 def __str__(self):
30 def __str__(self):
31 return short(self.node())
31 return short(self.node())
32
32
33 def __int__(self):
33 def __int__(self):
34 return self.rev()
34 return self.rev()
35
35
36 def __repr__(self):
36 def __repr__(self):
37 return "<changectx %s>" % str(self)
37 return "<changectx %s>" % str(self)
38
38
39 def __hash__(self):
39 def __hash__(self):
40 try:
40 try:
41 return hash(self._rev)
41 return hash(self._rev)
42 except AttributeError:
42 except AttributeError:
43 return id(self)
43 return id(self)
44
44
45 def __eq__(self, other):
45 def __eq__(self, other):
46 try:
46 try:
47 return self._rev == other._rev
47 return self._rev == other._rev
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50
50
51 def __ne__(self, other):
51 def __ne__(self, other):
52 return not (self == other)
52 return not (self == other)
53
53
54 def __nonzero__(self):
54 def __nonzero__(self):
55 return self._rev != nullrev
55 return self._rev != nullrev
56
56
57 @propertycache
57 @propertycache
58 def _changeset(self):
58 def _changeset(self):
59 return self._repo.changelog.read(self.node())
59 return self._repo.changelog.read(self.node())
60
60
61 @propertycache
61 @propertycache
62 def _manifest(self):
62 def _manifest(self):
63 return self._repo.manifest.read(self._changeset[0])
63 return self._repo.manifest.read(self._changeset[0])
64
64
65 @propertycache
65 @propertycache
66 def _manifestdelta(self):
66 def _manifestdelta(self):
67 return self._repo.manifest.readdelta(self._changeset[0])
67 return self._repo.manifest.readdelta(self._changeset[0])
68
68
69 @propertycache
69 @propertycache
70 def _parents(self):
70 def _parents(self):
71 p = self._repo.changelog.parentrevs(self._rev)
71 p = self._repo.changelog.parentrevs(self._rev)
72 if p[1] == nullrev:
72 if p[1] == nullrev:
73 p = p[:-1]
73 p = p[:-1]
74 return [changectx(self._repo, x) for x in p]
74 return [changectx(self._repo, x) for x in p]
75
75
76 @propertycache
76 @propertycache
77 def substate(self):
77 def substate(self):
78 return subrepo.state(self, self._repo.ui)
78 return subrepo.state(self, self._repo.ui)
79
79
80 def __contains__(self, key):
80 def __contains__(self, key):
81 return key in self._manifest
81 return key in self._manifest
82
82
83 def __getitem__(self, key):
83 def __getitem__(self, key):
84 return self.filectx(key)
84 return self.filectx(key)
85
85
86 def __iter__(self):
86 def __iter__(self):
87 for f in sorted(self._manifest):
87 for f in sorted(self._manifest):
88 yield f
88 yield f
89
89
90 def changeset(self):
90 def changeset(self):
91 return self._changeset
91 return self._changeset
92 def manifest(self):
92 def manifest(self):
93 return self._manifest
93 return self._manifest
94 def manifestnode(self):
94 def manifestnode(self):
95 return self._changeset[0]
95 return self._changeset[0]
96
96
97 def rev(self):
97 def rev(self):
98 return self._rev
98 return self._rev
99 def node(self):
99 def node(self):
100 return self._node
100 return self._node
101 def hex(self):
101 def hex(self):
102 return hex(self._node)
102 return hex(self._node)
103 def user(self):
103 def user(self):
104 return self._changeset[1]
104 return self._changeset[1]
105 def date(self):
105 def date(self):
106 return self._changeset[2]
106 return self._changeset[2]
107 def files(self):
107 def files(self):
108 return self._changeset[3]
108 return self._changeset[3]
109 def description(self):
109 def description(self):
110 return self._changeset[4]
110 return self._changeset[4]
111 def branch(self):
111 def branch(self):
112 return self._changeset[5].get("branch")
112 return self._changeset[5].get("branch")
113 def extra(self):
113 def extra(self):
114 return self._changeset[5]
114 return self._changeset[5]
115 def tags(self):
115 def tags(self):
116 return self._repo.nodetags(self._node)
116 return self._repo.nodetags(self._node)
117
117
118 def parents(self):
118 def parents(self):
119 """return contexts for each parent changeset"""
119 """return contexts for each parent changeset"""
120 return self._parents
120 return self._parents
121
121
122 def p1(self):
122 def p1(self):
123 return self._parents[0]
123 return self._parents[0]
124
124
125 def p2(self):
125 def p2(self):
126 if len(self._parents) == 2:
126 if len(self._parents) == 2:
127 return self._parents[1]
127 return self._parents[1]
128 return changectx(self._repo, -1)
128 return changectx(self._repo, -1)
129
129
130 def children(self):
130 def children(self):
131 """return contexts for each child changeset"""
131 """return contexts for each child changeset"""
132 c = self._repo.changelog.children(self._node)
132 c = self._repo.changelog.children(self._node)
133 return [changectx(self._repo, x) for x in c]
133 return [changectx(self._repo, x) for x in c]
134
134
135 def ancestors(self):
135 def ancestors(self):
136 for a in self._repo.changelog.ancestors(self._rev):
136 for a in self._repo.changelog.ancestors(self._rev):
137 yield changectx(self._repo, a)
137 yield changectx(self._repo, a)
138
138
139 def descendants(self):
139 def descendants(self):
140 for d in self._repo.changelog.descendants(self._rev):
140 for d in self._repo.changelog.descendants(self._rev):
141 yield changectx(self._repo, d)
141 yield changectx(self._repo, d)
142
142
143 def _fileinfo(self, path):
143 def _fileinfo(self, path):
144 if '_manifest' in self.__dict__:
144 if '_manifest' in self.__dict__:
145 try:
145 try:
146 return self._manifest[path], self._manifest.flags(path)
146 return self._manifest[path], self._manifest.flags(path)
147 except KeyError:
147 except KeyError:
148 raise error.LookupError(self._node, path,
148 raise error.LookupError(self._node, path,
149 _('not found in manifest'))
149 _('not found in manifest'))
150 if '_manifestdelta' in self.__dict__ or path in self.files():
150 if '_manifestdelta' in self.__dict__ or path in self.files():
151 if path in self._manifestdelta:
151 if path in self._manifestdelta:
152 return self._manifestdelta[path], self._manifestdelta.flags(path)
152 return self._manifestdelta[path], self._manifestdelta.flags(path)
153 node, flag = self._repo.manifest.find(self._changeset[0], path)
153 node, flag = self._repo.manifest.find(self._changeset[0], path)
154 if not node:
154 if not node:
155 raise error.LookupError(self._node, path,
155 raise error.LookupError(self._node, path,
156 _('not found in manifest'))
156 _('not found in manifest'))
157
157
158 return node, flag
158 return node, flag
159
159
160 def filenode(self, path):
160 def filenode(self, path):
161 return self._fileinfo(path)[0]
161 return self._fileinfo(path)[0]
162
162
163 def flags(self, path):
163 def flags(self, path):
164 try:
164 try:
165 return self._fileinfo(path)[1]
165 return self._fileinfo(path)[1]
166 except error.LookupError:
166 except error.LookupError:
167 return ''
167 return ''
168
168
169 def filectx(self, path, fileid=None, filelog=None):
169 def filectx(self, path, fileid=None, filelog=None):
170 """get a file context from this changeset"""
170 """get a file context from this changeset"""
171 if fileid is None:
171 if fileid is None:
172 fileid = self.filenode(path)
172 fileid = self.filenode(path)
173 return filectx(self._repo, path, fileid=fileid,
173 return filectx(self._repo, path, fileid=fileid,
174 changectx=self, filelog=filelog)
174 changectx=self, filelog=filelog)
175
175
176 def ancestor(self, c2):
176 def ancestor(self, c2):
177 """
177 """
178 return the ancestor context of self and c2
178 return the ancestor context of self and c2
179 """
179 """
180 # deal with workingctxs
180 # deal with workingctxs
181 n2 = c2._node
181 n2 = c2._node
182 if n2 == None:
182 if n2 == None:
183 n2 = c2._parents[0]._node
183 n2 = c2._parents[0]._node
184 n = self._repo.changelog.ancestor(self._node, n2)
184 n = self._repo.changelog.ancestor(self._node, n2)
185 return changectx(self._repo, n)
185 return changectx(self._repo, n)
186
186
187 def walk(self, match):
187 def walk(self, match):
188 fset = set(match.files())
188 fset = set(match.files())
189 # for dirstate.walk, files=['.'] means "walk the whole tree".
189 # for dirstate.walk, files=['.'] means "walk the whole tree".
190 # follow that here, too
190 # follow that here, too
191 fset.discard('.')
191 fset.discard('.')
192 for fn in self:
192 for fn in self:
193 for ffn in fset:
193 for ffn in fset:
194 # match if the file is the exact name or a directory
194 # match if the file is the exact name or a directory
195 if ffn == fn or fn.startswith("%s/" % ffn):
195 if ffn == fn or fn.startswith("%s/" % ffn):
196 fset.remove(ffn)
196 fset.remove(ffn)
197 break
197 break
198 if match(fn):
198 if match(fn):
199 yield fn
199 yield fn
200 for fn in sorted(fset):
200 for fn in sorted(fset):
201 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
201 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
202 yield fn
202 yield fn
203
203
204 def sub(self, path):
204 def sub(self, path):
205 return subrepo.subrepo(self, path)
205 return subrepo.subrepo(self, path)
206
206
207 def diff(self, ctx2=None, match=None, **opts):
207 def diff(self, ctx2=None, match=None, **opts):
208 """Returns a diff generator for the given contexts and matcher"""
208 """Returns a diff generator for the given contexts and matcher"""
209 if ctx2 is None:
209 if ctx2 is None:
210 ctx2 = self.p1()
210 ctx2 = self.p1()
211 if ctx2 is not None and not isinstance(ctx2, changectx):
211 if ctx2 is not None and not isinstance(ctx2, changectx):
212 ctx2 = self._repo[ctx2]
212 ctx2 = self._repo[ctx2]
213 diffopts = patch.diffopts(self._repo.ui, opts)
213 diffopts = patch.diffopts(self._repo.ui, opts)
214 return patch.diff(self._repo, ctx2.node(), self.node(),
214 return patch.diff(self._repo, ctx2.node(), self.node(),
215 match=match, opts=diffopts)
215 match=match, opts=diffopts)
216
216
217 class filectx(object):
217 class filectx(object):
218 """A filecontext object makes access to data related to a particular
218 """A filecontext object makes access to data related to a particular
219 filerevision convenient."""
219 filerevision convenient."""
220 def __init__(self, repo, path, changeid=None, fileid=None,
220 def __init__(self, repo, path, changeid=None, fileid=None,
221 filelog=None, changectx=None):
221 filelog=None, changectx=None):
222 """changeid can be a changeset revision, node, or tag.
222 """changeid can be a changeset revision, node, or tag.
223 fileid can be a file revision or node."""
223 fileid can be a file revision or node."""
224 self._repo = repo
224 self._repo = repo
225 self._path = path
225 self._path = path
226
226
227 assert (changeid is not None
227 assert (changeid is not None
228 or fileid is not None
228 or fileid is not None
229 or changectx is not None), \
229 or changectx is not None), \
230 ("bad args: changeid=%r, fileid=%r, changectx=%r"
230 ("bad args: changeid=%r, fileid=%r, changectx=%r"
231 % (changeid, fileid, changectx))
231 % (changeid, fileid, changectx))
232
232
233 if filelog:
233 if filelog:
234 self._filelog = filelog
234 self._filelog = filelog
235
235
236 if changeid is not None:
236 if changeid is not None:
237 self._changeid = changeid
237 self._changeid = changeid
238 if changectx is not None:
238 if changectx is not None:
239 self._changectx = changectx
239 self._changectx = changectx
240 if fileid is not None:
240 if fileid is not None:
241 self._fileid = fileid
241 self._fileid = fileid
242
242
243 @propertycache
243 @propertycache
244 def _changectx(self):
244 def _changectx(self):
245 return changectx(self._repo, self._changeid)
245 return changectx(self._repo, self._changeid)
246
246
247 @propertycache
247 @propertycache
248 def _filelog(self):
248 def _filelog(self):
249 return self._repo.file(self._path)
249 return self._repo.file(self._path)
250
250
251 @propertycache
251 @propertycache
252 def _changeid(self):
252 def _changeid(self):
253 if '_changectx' in self.__dict__:
253 if '_changectx' in self.__dict__:
254 return self._changectx.rev()
254 return self._changectx.rev()
255 else:
255 else:
256 return self._filelog.linkrev(self._filerev)
256 return self._filelog.linkrev(self._filerev)
257
257
258 @propertycache
258 @propertycache
259 def _filenode(self):
259 def _filenode(self):
260 if '_fileid' in self.__dict__:
260 if '_fileid' in self.__dict__:
261 return self._filelog.lookup(self._fileid)
261 return self._filelog.lookup(self._fileid)
262 else:
262 else:
263 return self._changectx.filenode(self._path)
263 return self._changectx.filenode(self._path)
264
264
265 @propertycache
265 @propertycache
266 def _filerev(self):
266 def _filerev(self):
267 return self._filelog.rev(self._filenode)
267 return self._filelog.rev(self._filenode)
268
268
269 @propertycache
269 @propertycache
270 def _repopath(self):
270 def _repopath(self):
271 return self._path
271 return self._path
272
272
273 def __nonzero__(self):
273 def __nonzero__(self):
274 try:
274 try:
275 self._filenode
275 self._filenode
276 return True
276 return True
277 except error.LookupError:
277 except error.LookupError:
278 # file is missing
278 # file is missing
279 return False
279 return False
280
280
281 def __str__(self):
281 def __str__(self):
282 return "%s@%s" % (self.path(), short(self.node()))
282 return "%s@%s" % (self.path(), short(self.node()))
283
283
284 def __repr__(self):
284 def __repr__(self):
285 return "<filectx %s>" % str(self)
285 return "<filectx %s>" % str(self)
286
286
287 def __hash__(self):
287 def __hash__(self):
288 try:
288 try:
289 return hash((self._path, self._filenode))
289 return hash((self._path, self._filenode))
290 except AttributeError:
290 except AttributeError:
291 return id(self)
291 return id(self)
292
292
293 def __eq__(self, other):
293 def __eq__(self, other):
294 try:
294 try:
295 return (self._path == other._path
295 return (self._path == other._path
296 and self._filenode == other._filenode)
296 and self._filenode == other._filenode)
297 except AttributeError:
297 except AttributeError:
298 return False
298 return False
299
299
300 def __ne__(self, other):
300 def __ne__(self, other):
301 return not (self == other)
301 return not (self == other)
302
302
303 def filectx(self, fileid):
303 def filectx(self, fileid):
304 '''opens an arbitrary revision of the file without
304 '''opens an arbitrary revision of the file without
305 opening a new filelog'''
305 opening a new filelog'''
306 return filectx(self._repo, self._path, fileid=fileid,
306 return filectx(self._repo, self._path, fileid=fileid,
307 filelog=self._filelog)
307 filelog=self._filelog)
308
308
309 def filerev(self):
309 def filerev(self):
310 return self._filerev
310 return self._filerev
311 def filenode(self):
311 def filenode(self):
312 return self._filenode
312 return self._filenode
313 def flags(self):
313 def flags(self):
314 return self._changectx.flags(self._path)
314 return self._changectx.flags(self._path)
315 def filelog(self):
315 def filelog(self):
316 return self._filelog
316 return self._filelog
317
317
318 def rev(self):
318 def rev(self):
319 if '_changectx' in self.__dict__:
319 if '_changectx' in self.__dict__:
320 return self._changectx.rev()
320 return self._changectx.rev()
321 if '_changeid' in self.__dict__:
321 if '_changeid' in self.__dict__:
322 return self._changectx.rev()
322 return self._changectx.rev()
323 return self._filelog.linkrev(self._filerev)
323 return self._filelog.linkrev(self._filerev)
324
324
325 def linkrev(self):
325 def linkrev(self):
326 return self._filelog.linkrev(self._filerev)
326 return self._filelog.linkrev(self._filerev)
327 def node(self):
327 def node(self):
328 return self._changectx.node()
328 return self._changectx.node()
329 def hex(self):
329 def hex(self):
330 return hex(self.node())
330 return hex(self.node())
331 def user(self):
331 def user(self):
332 return self._changectx.user()
332 return self._changectx.user()
333 def date(self):
333 def date(self):
334 return self._changectx.date()
334 return self._changectx.date()
335 def files(self):
335 def files(self):
336 return self._changectx.files()
336 return self._changectx.files()
337 def description(self):
337 def description(self):
338 return self._changectx.description()
338 return self._changectx.description()
339 def branch(self):
339 def branch(self):
340 return self._changectx.branch()
340 return self._changectx.branch()
341 def extra(self):
341 def extra(self):
342 return self._changectx.extra()
342 return self._changectx.extra()
343 def manifest(self):
343 def manifest(self):
344 return self._changectx.manifest()
344 return self._changectx.manifest()
345 def changectx(self):
345 def changectx(self):
346 return self._changectx
346 return self._changectx
347
347
348 def data(self):
348 def data(self):
349 return self._filelog.read(self._filenode)
349 return self._filelog.read(self._filenode)
350 def path(self):
350 def path(self):
351 return self._path
351 return self._path
352 def size(self):
352 def size(self):
353 return self._filelog.size(self._filerev)
353 return self._filelog.size(self._filerev)
354
354
355 def cmp(self, fctx):
355 def cmp(self, fctx):
356 """compare with other file context
356 """compare with other file context
357
357
358 returns True if different than fctx.
358 returns True if different than fctx.
359 """
359 """
360 if (fctx._filerev is None and self._repo._encodefilterpats
360 if (fctx._filerev is None and self._repo._encodefilterpats
361 or self.size() == fctx.size()):
361 or self.size() == fctx.size()):
362 return self._filelog.cmp(self._filenode, fctx.data())
362 return self._filelog.cmp(self._filenode, fctx.data())
363
363
364 return True
364 return True
365
365
366 def renamed(self):
366 def renamed(self):
367 """check if file was actually renamed in this changeset revision
367 """check if file was actually renamed in this changeset revision
368
368
369 If rename logged in file revision, we report copy for changeset only
369 If rename logged in file revision, we report copy for changeset only
370 if file revisions linkrev points back to the changeset in question
370 if file revisions linkrev points back to the changeset in question
371 or both changeset parents contain different file revisions.
371 or both changeset parents contain different file revisions.
372 """
372 """
373
373
374 renamed = self._filelog.renamed(self._filenode)
374 renamed = self._filelog.renamed(self._filenode)
375 if not renamed:
375 if not renamed:
376 return renamed
376 return renamed
377
377
378 if self.rev() == self.linkrev():
378 if self.rev() == self.linkrev():
379 return renamed
379 return renamed
380
380
381 name = self.path()
381 name = self.path()
382 fnode = self._filenode
382 fnode = self._filenode
383 for p in self._changectx.parents():
383 for p in self._changectx.parents():
384 try:
384 try:
385 if fnode == p.filenode(name):
385 if fnode == p.filenode(name):
386 return None
386 return None
387 except error.LookupError:
387 except error.LookupError:
388 pass
388 pass
389 return renamed
389 return renamed
390
390
391 def parents(self):
391 def parents(self):
392 p = self._path
392 p = self._path
393 fl = self._filelog
393 fl = self._filelog
394 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
394 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
395
395
396 r = self._filelog.renamed(self._filenode)
396 r = self._filelog.renamed(self._filenode)
397 if r:
397 if r:
398 pl[0] = (r[0], r[1], None)
398 pl[0] = (r[0], r[1], None)
399
399
400 return [filectx(self._repo, p, fileid=n, filelog=l)
400 return [filectx(self._repo, p, fileid=n, filelog=l)
401 for p, n, l in pl if n != nullid]
401 for p, n, l in pl if n != nullid]
402
402
403 def children(self):
403 def children(self):
404 # hard for renames
404 # hard for renames
405 c = self._filelog.children(self._filenode)
405 c = self._filelog.children(self._filenode)
406 return [filectx(self._repo, self._path, fileid=x,
406 return [filectx(self._repo, self._path, fileid=x,
407 filelog=self._filelog) for x in c]
407 filelog=self._filelog) for x in c]
408
408
409 def annotate(self, follow=False, linenumber=None):
409 def annotate(self, follow=False, linenumber=None):
410 '''returns a list of tuples of (ctx, line) for each line
410 '''returns a list of tuples of (ctx, line) for each line
411 in the file, where ctx is the filectx of the node where
411 in the file, where ctx is the filectx of the node where
412 that line was last changed.
412 that line was last changed.
413 This returns tuples of ((ctx, linenumber), line) for each line,
413 This returns tuples of ((ctx, linenumber), line) for each line,
414 if "linenumber" parameter is NOT "None".
414 if "linenumber" parameter is NOT "None".
415 In such tuples, linenumber means one at the first appearance
415 In such tuples, linenumber means one at the first appearance
416 in the managed file.
416 in the managed file.
417 To reduce annotation cost,
417 To reduce annotation cost,
418 this returns fixed value(False is used) as linenumber,
418 this returns fixed value(False is used) as linenumber,
419 if "linenumber" parameter is "False".'''
419 if "linenumber" parameter is "False".'''
420
420
421 def decorate_compat(text, rev):
421 def decorate_compat(text, rev):
422 return ([rev] * len(text.splitlines()), text)
422 return ([rev] * len(text.splitlines()), text)
423
423
424 def without_linenumber(text, rev):
424 def without_linenumber(text, rev):
425 return ([(rev, False)] * len(text.splitlines()), text)
425 return ([(rev, False)] * len(text.splitlines()), text)
426
426
427 def with_linenumber(text, rev):
427 def with_linenumber(text, rev):
428 size = len(text.splitlines())
428 size = len(text.splitlines())
429 return ([(rev, i) for i in xrange(1, size + 1)], text)
429 return ([(rev, i) for i in xrange(1, size + 1)], text)
430
430
431 decorate = (((linenumber is None) and decorate_compat) or
431 decorate = (((linenumber is None) and decorate_compat) or
432 (linenumber and with_linenumber) or
432 (linenumber and with_linenumber) or
433 without_linenumber)
433 without_linenumber)
434
434
435 def pair(parent, child):
435 def pair(parent, child):
436 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
436 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
437 child[0][b1:b2] = parent[0][a1:a2]
437 child[0][b1:b2] = parent[0][a1:a2]
438 return child
438 return child
439
439
440 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
440 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
441 def getctx(path, fileid):
441 def getctx(path, fileid):
442 log = path == self._path and self._filelog or getlog(path)
442 log = path == self._path and self._filelog or getlog(path)
443 return filectx(self._repo, path, fileid=fileid, filelog=log)
443 return filectx(self._repo, path, fileid=fileid, filelog=log)
444 getctx = util.lrucachefunc(getctx)
444 getctx = util.lrucachefunc(getctx)
445
445
446 def parents(f):
446 def parents(f):
447 # we want to reuse filectx objects as much as possible
447 # we want to reuse filectx objects as much as possible
448 p = f._path
448 p = f._path
449 if f._filerev is None: # working dir
449 if f._filerev is None: # working dir
450 pl = [(n.path(), n.filerev()) for n in f.parents()]
450 pl = [(n.path(), n.filerev()) for n in f.parents()]
451 else:
451 else:
452 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
452 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
453
453
454 if follow:
454 if follow:
455 r = f.renamed()
455 r = f.renamed()
456 if r:
456 if r:
457 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
457 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
458
458
459 return [getctx(p, n) for p, n in pl if n != nullrev]
459 return [getctx(p, n) for p, n in pl if n != nullrev]
460
460
461 # use linkrev to find the first changeset where self appeared
461 # use linkrev to find the first changeset where self appeared
462 if self.rev() != self.linkrev():
462 if self.rev() != self.linkrev():
463 base = self.filectx(self.filerev())
463 base = self.filectx(self.filerev())
464 else:
464 else:
465 base = self
465 base = self
466
466
467 # find all ancestors
467 # find all ancestors
468 needed = {base: 1}
468 needed = {base: 1}
469 visit = [base]
469 visit = [base]
470 files = [base._path]
470 files = [base._path]
471 while visit:
471 while visit:
472 f = visit.pop(0)
472 f = visit.pop(0)
473 for p in parents(f):
473 for p in parents(f):
474 if p not in needed:
474 if p not in needed:
475 needed[p] = 1
475 needed[p] = 1
476 visit.append(p)
476 visit.append(p)
477 if p._path not in files:
477 if p._path not in files:
478 files.append(p._path)
478 files.append(p._path)
479 else:
479 else:
480 # count how many times we'll use this
480 # count how many times we'll use this
481 needed[p] += 1
481 needed[p] += 1
482
482
483 # sort by revision (per file) which is a topological order
483 # sort by revision (per file) which is a topological order
484 visit = []
484 visit = []
485 for f in files:
485 for f in files:
486 visit.extend(n for n in needed if n._path == f)
486 visit.extend(n for n in needed if n._path == f)
487
487
488 hist = {}
488 hist = {}
489 for f in sorted(visit, key=lambda x: x.rev()):
489 for f in sorted(visit, key=lambda x: x.rev()):
490 curr = decorate(f.data(), f)
490 curr = decorate(f.data(), f)
491 for p in parents(f):
491 for p in parents(f):
492 curr = pair(hist[p], curr)
492 curr = pair(hist[p], curr)
493 # trim the history of unneeded revs
493 # trim the history of unneeded revs
494 needed[p] -= 1
494 needed[p] -= 1
495 if not needed[p]:
495 if not needed[p]:
496 del hist[p]
496 del hist[p]
497 hist[f] = curr
497 hist[f] = curr
498
498
499 return zip(hist[f][0], hist[f][1].splitlines(True))
499 return zip(hist[f][0], hist[f][1].splitlines(True))
500
500
501 def ancestor(self, fc2, actx=None):
501 def ancestor(self, fc2, actx=None):
502 """
502 """
503 find the common ancestor file context, if any, of self, and fc2
503 find the common ancestor file context, if any, of self, and fc2
504
504
505 If actx is given, it must be the changectx of the common ancestor
505 If actx is given, it must be the changectx of the common ancestor
506 of self's and fc2's respective changesets.
506 of self's and fc2's respective changesets.
507 """
507 """
508
508
509 if actx is None:
509 if actx is None:
510 actx = self.changectx().ancestor(fc2.changectx())
510 actx = self.changectx().ancestor(fc2.changectx())
511
511
512 # the trivial case: changesets are unrelated, files must be too
512 # the trivial case: changesets are unrelated, files must be too
513 if not actx:
513 if not actx:
514 return None
514 return None
515
515
516 # the easy case: no (relevant) renames
516 # the easy case: no (relevant) renames
517 if fc2.path() == self.path() and self.path() in actx:
517 if fc2.path() == self.path() and self.path() in actx:
518 return actx[self.path()]
518 return actx[self.path()]
519 acache = {}
519 acache = {}
520
520
521 # prime the ancestor cache for the working directory
521 # prime the ancestor cache for the working directory
522 for c in (self, fc2):
522 for c in (self, fc2):
523 if c._filerev is None:
523 if c._filerev is None:
524 pl = [(n.path(), n.filenode()) for n in c.parents()]
524 pl = [(n.path(), n.filenode()) for n in c.parents()]
525 acache[(c._path, None)] = pl
525 acache[(c._path, None)] = pl
526
526
527 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
527 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
528 def parents(vertex):
528 def parents(vertex):
529 if vertex in acache:
529 if vertex in acache:
530 return acache[vertex]
530 return acache[vertex]
531 f, n = vertex
531 f, n = vertex
532 if f not in flcache:
532 if f not in flcache:
533 flcache[f] = self._repo.file(f)
533 flcache[f] = self._repo.file(f)
534 fl = flcache[f]
534 fl = flcache[f]
535 pl = [(f, p) for p in fl.parents(n) if p != nullid]
535 pl = [(f, p) for p in fl.parents(n) if p != nullid]
536 re = fl.renamed(n)
536 re = fl.renamed(n)
537 if re:
537 if re:
538 pl.append(re)
538 pl.append(re)
539 acache[vertex] = pl
539 acache[vertex] = pl
540 return pl
540 return pl
541
541
542 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
542 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
543 v = ancestor.ancestor(a, b, parents)
543 v = ancestor.ancestor(a, b, parents)
544 if v:
544 if v:
545 f, n = v
545 f, n = v
546 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
546 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
547
547
548 return None
548 return None
549
549
550 def ancestors(self):
550 def ancestors(self):
551 seen = set(str(self))
551 seen = set(str(self))
552 visit = [self]
552 visit = [self]
553 while visit:
553 while visit:
554 for parent in visit.pop(0).parents():
554 for parent in visit.pop(0).parents():
555 s = str(parent)
555 s = str(parent)
556 if s not in seen:
556 if s not in seen:
557 visit.append(parent)
557 visit.append(parent)
558 seen.add(s)
558 seen.add(s)
559 yield parent
559 yield parent
560
560
561 class workingctx(changectx):
561 class workingctx(changectx):
562 """A workingctx object makes access to data related to
562 """A workingctx object makes access to data related to
563 the current working directory convenient.
563 the current working directory convenient.
564 date - any valid date string or (unixtime, offset), or None.
564 date - any valid date string or (unixtime, offset), or None.
565 user - username string, or None.
565 user - username string, or None.
566 extra - a dictionary of extra values, or None.
566 extra - a dictionary of extra values, or None.
567 changes - a list of file lists as returned by localrepo.status()
567 changes - a list of file lists as returned by localrepo.status()
568 or None to use the repository status.
568 or None to use the repository status.
569 """
569 """
570 def __init__(self, repo, text="", user=None, date=None, extra=None,
570 def __init__(self, repo, text="", user=None, date=None, extra=None,
571 changes=None):
571 changes=None):
572 self._repo = repo
572 self._repo = repo
573 self._rev = None
573 self._rev = None
574 self._node = None
574 self._node = None
575 self._text = text
575 self._text = text
576 if date:
576 if date:
577 self._date = util.parsedate(date)
577 self._date = util.parsedate(date)
578 if user:
578 if user:
579 self._user = user
579 self._user = user
580 if changes:
580 if changes:
581 self._status = list(changes[:4])
581 self._status = list(changes[:4])
582 self._unknown = changes[4]
582 self._unknown = changes[4]
583 self._ignored = changes[5]
583 self._ignored = changes[5]
584 self._clean = changes[6]
584 self._clean = changes[6]
585 else:
585 else:
586 self._unknown = None
586 self._unknown = None
587 self._ignored = None
587 self._ignored = None
588 self._clean = None
588 self._clean = None
589
589
590 self._extra = {}
590 self._extra = {}
591 if extra:
591 if extra:
592 self._extra = extra.copy()
592 self._extra = extra.copy()
593 if 'branch' not in self._extra:
593 if 'branch' not in self._extra:
594 branch = self._repo.dirstate.branch()
594 branch = self._repo.dirstate.branch()
595 try:
595 try:
596 branch = branch.decode('UTF-8').encode('UTF-8')
596 branch = branch.decode('UTF-8').encode('UTF-8')
597 except UnicodeDecodeError:
597 except UnicodeDecodeError:
598 raise util.Abort(_('branch name not in UTF-8!'))
598 raise util.Abort(_('branch name not in UTF-8!'))
599 self._extra['branch'] = branch
599 self._extra['branch'] = branch
600 if self._extra['branch'] == '':
600 if self._extra['branch'] == '':
601 self._extra['branch'] = 'default'
601 self._extra['branch'] = 'default'
602
602
603 def __str__(self):
603 def __str__(self):
604 return str(self._parents[0]) + "+"
604 return str(self._parents[0]) + "+"
605
605
606 def __repr__(self):
606 def __repr__(self):
607 return "<workingctx %s>" % str(self)
607 return "<workingctx %s>" % str(self)
608
608
609 def __nonzero__(self):
609 def __nonzero__(self):
610 return True
610 return True
611
611
612 def __contains__(self, key):
612 def __contains__(self, key):
613 return self._repo.dirstate[key] not in "?r"
613 return self._repo.dirstate[key] not in "?r"
614
614
615 @propertycache
615 @propertycache
616 def _manifest(self):
616 def _manifest(self):
617 """generate a manifest corresponding to the working directory"""
617 """generate a manifest corresponding to the working directory"""
618
618
619 if self._unknown is None:
619 if self._unknown is None:
620 self.status(unknown=True)
620 self.status(unknown=True)
621
621
622 man = self._parents[0].manifest().copy()
622 man = self._parents[0].manifest().copy()
623 copied = self._repo.dirstate.copies()
623 copied = self._repo.dirstate.copies()
624 if len(self._parents) > 1:
624 if len(self._parents) > 1:
625 man2 = self.p2().manifest()
625 man2 = self.p2().manifest()
626 def getman(f):
626 def getman(f):
627 if f in man:
627 if f in man:
628 return man
628 return man
629 return man2
629 return man2
630 else:
630 else:
631 getman = lambda f: man
631 getman = lambda f: man
632 def cf(f):
632 def cf(f):
633 f = copied.get(f, f)
633 f = copied.get(f, f)
634 return getman(f).flags(f)
634 return getman(f).flags(f)
635 ff = self._repo.dirstate.flagfunc(cf)
635 ff = self._repo.dirstate.flagfunc(cf)
636 modified, added, removed, deleted = self._status
636 modified, added, removed, deleted = self._status
637 unknown = self._unknown
637 unknown = self._unknown
638 for i, l in (("a", added), ("m", modified), ("u", unknown)):
638 for i, l in (("a", added), ("m", modified), ("u", unknown)):
639 for f in l:
639 for f in l:
640 orig = copied.get(f, f)
640 orig = copied.get(f, f)
641 man[f] = getman(orig).get(orig, nullid) + i
641 man[f] = getman(orig).get(orig, nullid) + i
642 try:
642 try:
643 man.set(f, ff(f))
643 man.set(f, ff(f))
644 except OSError:
644 except OSError:
645 pass
645 pass
646
646
647 for f in deleted + removed:
647 for f in deleted + removed:
648 if f in man:
648 if f in man:
649 del man[f]
649 del man[f]
650
650
651 return man
651 return man
652
652
653 @propertycache
653 @propertycache
654 def _status(self):
654 def _status(self):
655 return self._repo.status()[:4]
655 return self._repo.status()[:4]
656
656
657 @propertycache
657 @propertycache
658 def _user(self):
658 def _user(self):
659 return self._repo.ui.username()
659 return self._repo.ui.username()
660
660
661 @propertycache
661 @propertycache
662 def _date(self):
662 def _date(self):
663 return util.makedate()
663 return util.makedate()
664
664
665 @propertycache
665 @propertycache
666 def _parents(self):
666 def _parents(self):
667 p = self._repo.dirstate.parents()
667 p = self._repo.dirstate.parents()
668 if p[1] == nullid:
668 if p[1] == nullid:
669 p = p[:-1]
669 p = p[:-1]
670 self._parents = [changectx(self._repo, x) for x in p]
670 self._parents = [changectx(self._repo, x) for x in p]
671 return self._parents
671 return self._parents
672
672
673 def status(self, ignored=False, clean=False, unknown=False):
673 def status(self, ignored=False, clean=False, unknown=False):
674 """Explicit status query
674 """Explicit status query
675 Unless this method is used to query the working copy status, the
675 Unless this method is used to query the working copy status, the
676 _status property will implicitly read the status using its default
676 _status property will implicitly read the status using its default
677 arguments."""
677 arguments."""
678 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
678 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
679 self._unknown = self._ignored = self._clean = None
679 self._unknown = self._ignored = self._clean = None
680 if unknown:
680 if unknown:
681 self._unknown = stat[4]
681 self._unknown = stat[4]
682 if ignored:
682 if ignored:
683 self._ignored = stat[5]
683 self._ignored = stat[5]
684 if clean:
684 if clean:
685 self._clean = stat[6]
685 self._clean = stat[6]
686 self._status = stat[:4]
686 self._status = stat[:4]
687 return stat
687 return stat
688
688
689 def manifest(self):
689 def manifest(self):
690 return self._manifest
690 return self._manifest
691 def user(self):
691 def user(self):
692 return self._user or self._repo.ui.username()
692 return self._user or self._repo.ui.username()
693 def date(self):
693 def date(self):
694 return self._date
694 return self._date
695 def description(self):
695 def description(self):
696 return self._text
696 return self._text
697 def files(self):
697 def files(self):
698 return sorted(self._status[0] + self._status[1] + self._status[2])
698 return sorted(self._status[0] + self._status[1] + self._status[2])
699
699
700 def modified(self):
700 def modified(self):
701 return self._status[0]
701 return self._status[0]
702 def added(self):
702 def added(self):
703 return self._status[1]
703 return self._status[1]
704 def removed(self):
704 def removed(self):
705 return self._status[2]
705 return self._status[2]
706 def deleted(self):
706 def deleted(self):
707 return self._status[3]
707 return self._status[3]
708 def unknown(self):
708 def unknown(self):
709 assert self._unknown is not None # must call status first
709 assert self._unknown is not None # must call status first
710 return self._unknown
710 return self._unknown
711 def ignored(self):
711 def ignored(self):
712 assert self._ignored is not None # must call status first
712 assert self._ignored is not None # must call status first
713 return self._ignored
713 return self._ignored
714 def clean(self):
714 def clean(self):
715 assert self._clean is not None # must call status first
715 assert self._clean is not None # must call status first
716 return self._clean
716 return self._clean
717 def branch(self):
717 def branch(self):
718 return self._extra['branch']
718 return self._extra['branch']
719 def extra(self):
719 def extra(self):
720 return self._extra
720 return self._extra
721
721
722 def tags(self):
722 def tags(self):
723 t = []
723 t = []
724 [t.extend(p.tags()) for p in self.parents()]
724 [t.extend(p.tags()) for p in self.parents()]
725 return t
725 return t
726
726
727 def children(self):
727 def children(self):
728 return []
728 return []
729
729
730 def flags(self, path):
730 def flags(self, path):
731 if '_manifest' in self.__dict__:
731 if '_manifest' in self.__dict__:
732 try:
732 try:
733 return self._manifest.flags(path)
733 return self._manifest.flags(path)
734 except KeyError:
734 except KeyError:
735 return ''
735 return ''
736
736
737 orig = self._repo.dirstate.copies().get(path, path)
737 orig = self._repo.dirstate.copies().get(path, path)
738
738
739 def findflag(ctx):
739 def findflag(ctx):
740 mnode = ctx.changeset()[0]
740 mnode = ctx.changeset()[0]
741 node, flag = self._repo.manifest.find(mnode, orig)
741 node, flag = self._repo.manifest.find(mnode, orig)
742 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
742 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
743 try:
743 try:
744 return ff(path)
744 return ff(path)
745 except OSError:
745 except OSError:
746 pass
746 pass
747
747
748 flag = findflag(self._parents[0])
748 flag = findflag(self._parents[0])
749 if flag is None and len(self.parents()) > 1:
749 if flag is None and len(self.parents()) > 1:
750 flag = findflag(self._parents[1])
750 flag = findflag(self._parents[1])
751 if flag is None or self._repo.dirstate[path] == 'r':
751 if flag is None or self._repo.dirstate[path] == 'r':
752 return ''
752 return ''
753 return flag
753 return flag
754
754
755 def filectx(self, path, filelog=None):
755 def filectx(self, path, filelog=None):
756 """get a file context from the working directory"""
756 """get a file context from the working directory"""
757 return workingfilectx(self._repo, path, workingctx=self,
757 return workingfilectx(self._repo, path, workingctx=self,
758 filelog=filelog)
758 filelog=filelog)
759
759
760 def ancestor(self, c2):
760 def ancestor(self, c2):
761 """return the ancestor context of self and c2"""
761 """return the ancestor context of self and c2"""
762 return self._parents[0].ancestor(c2) # punt on two parents for now
762 return self._parents[0].ancestor(c2) # punt on two parents for now
763
763
764 def walk(self, match):
764 def walk(self, match):
765 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
765 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
766 True, False))
766 True, False))
767
767
768 def dirty(self, missing=False):
768 def dirty(self, missing=False):
769 "check whether a working directory is modified"
769 "check whether a working directory is modified"
770 # check subrepos first
770 # check subrepos first
771 for s in self.substate:
771 for s in self.substate:
772 if self.sub(s).dirty():
772 if self.sub(s).dirty():
773 return True
773 return True
774 # check current working dir
774 # check current working dir
775 return (self.p2() or self.branch() != self.p1().branch() or
775 return (self.p2() or self.branch() != self.p1().branch() or
776 self.modified() or self.added() or self.removed() or
776 self.modified() or self.added() or self.removed() or
777 (missing and self.deleted()))
777 (missing and self.deleted()))
778
778
779 def add(self, list, prefix=""):
779 def add(self, list, prefix=""):
780 join = lambda f: os.path.join(prefix, f)
780 join = lambda f: os.path.join(prefix, f)
781 wlock = self._repo.wlock()
781 wlock = self._repo.wlock()
782 ui, ds = self._repo.ui, self._repo.dirstate
782 ui, ds = self._repo.ui, self._repo.dirstate
783 try:
783 try:
784 rejected = []
784 rejected = []
785 for f in list:
785 for f in list:
786 p = self._repo.wjoin(f)
786 p = self._repo.wjoin(f)
787 try:
787 try:
788 st = os.lstat(p)
788 st = os.lstat(p)
789 except:
789 except:
790 ui.warn(_("%s does not exist!\n") % join(f))
790 ui.warn(_("%s does not exist!\n") % join(f))
791 rejected.append(f)
791 rejected.append(f)
792 continue
792 continue
793 if st.st_size > 10000000:
793 if st.st_size > 10000000:
794 ui.warn(_("%s: up to %d MB of RAM may be required "
794 ui.warn(_("%s: up to %d MB of RAM may be required "
795 "to manage this file\n"
795 "to manage this file\n"
796 "(use 'hg revert %s' to cancel the "
796 "(use 'hg revert %s' to cancel the "
797 "pending addition)\n")
797 "pending addition)\n")
798 % (f, 3 * st.st_size // 1000000, join(f)))
798 % (f, 3 * st.st_size // 1000000, join(f)))
799 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
799 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
800 ui.warn(_("%s not added: only files and symlinks "
800 ui.warn(_("%s not added: only files and symlinks "
801 "supported currently\n") % join(f))
801 "supported currently\n") % join(f))
802 rejected.append(p)
802 rejected.append(p)
803 elif ds[f] in 'amn':
803 elif ds[f] in 'amn':
804 ui.warn(_("%s already tracked!\n") % join(f))
804 ui.warn(_("%s already tracked!\n") % join(f))
805 elif ds[f] == 'r':
805 elif ds[f] == 'r':
806 ds.normallookup(f)
806 ds.normallookup(f)
807 else:
807 else:
808 ds.add(f)
808 ds.add(f)
809 return rejected
809 return rejected
810 finally:
810 finally:
811 wlock.release()
811 wlock.release()
812
812
813 def forget(self, list):
813 def forget(self, list):
814 wlock = self._repo.wlock()
814 wlock = self._repo.wlock()
815 try:
815 try:
816 for f in list:
816 for f in list:
817 if self._repo.dirstate[f] != 'a':
817 if self._repo.dirstate[f] != 'a':
818 self._repo.ui.warn(_("%s not added!\n") % f)
818 self._repo.ui.warn(_("%s not added!\n") % f)
819 else:
819 else:
820 self._repo.dirstate.forget(f)
820 self._repo.dirstate.forget(f)
821 finally:
821 finally:
822 wlock.release()
822 wlock.release()
823
823
824 def ancestors(self):
825 for a in self._repo.changelog.ancestors(
826 *[p.rev() for p in self._parents]):
827 yield changectx(self._repo, a)
828
824 def remove(self, list, unlink=False):
829 def remove(self, list, unlink=False):
825 if unlink:
830 if unlink:
826 for f in list:
831 for f in list:
827 try:
832 try:
828 util.unlink(self._repo.wjoin(f))
833 util.unlink(self._repo.wjoin(f))
829 except OSError, inst:
834 except OSError, inst:
830 if inst.errno != errno.ENOENT:
835 if inst.errno != errno.ENOENT:
831 raise
836 raise
832 wlock = self._repo.wlock()
837 wlock = self._repo.wlock()
833 try:
838 try:
834 for f in list:
839 for f in list:
835 if unlink and os.path.lexists(self._repo.wjoin(f)):
840 if unlink and os.path.lexists(self._repo.wjoin(f)):
836 self._repo.ui.warn(_("%s still exists!\n") % f)
841 self._repo.ui.warn(_("%s still exists!\n") % f)
837 elif self._repo.dirstate[f] == 'a':
842 elif self._repo.dirstate[f] == 'a':
838 self._repo.dirstate.forget(f)
843 self._repo.dirstate.forget(f)
839 elif f not in self._repo.dirstate:
844 elif f not in self._repo.dirstate:
840 self._repo.ui.warn(_("%s not tracked!\n") % f)
845 self._repo.ui.warn(_("%s not tracked!\n") % f)
841 else:
846 else:
842 self._repo.dirstate.remove(f)
847 self._repo.dirstate.remove(f)
843 finally:
848 finally:
844 wlock.release()
849 wlock.release()
845
850
846 def undelete(self, list):
851 def undelete(self, list):
847 pctxs = self.parents()
852 pctxs = self.parents()
848 wlock = self._repo.wlock()
853 wlock = self._repo.wlock()
849 try:
854 try:
850 for f in list:
855 for f in list:
851 if self._repo.dirstate[f] != 'r':
856 if self._repo.dirstate[f] != 'r':
852 self._repo.ui.warn(_("%s not removed!\n") % f)
857 self._repo.ui.warn(_("%s not removed!\n") % f)
853 else:
858 else:
854 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
859 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
855 t = fctx.data()
860 t = fctx.data()
856 self._repo.wwrite(f, t, fctx.flags())
861 self._repo.wwrite(f, t, fctx.flags())
857 self._repo.dirstate.normal(f)
862 self._repo.dirstate.normal(f)
858 finally:
863 finally:
859 wlock.release()
864 wlock.release()
860
865
861 def copy(self, source, dest):
866 def copy(self, source, dest):
862 p = self._repo.wjoin(dest)
867 p = self._repo.wjoin(dest)
863 if not os.path.lexists(p):
868 if not os.path.lexists(p):
864 self._repo.ui.warn(_("%s does not exist!\n") % dest)
869 self._repo.ui.warn(_("%s does not exist!\n") % dest)
865 elif not (os.path.isfile(p) or os.path.islink(p)):
870 elif not (os.path.isfile(p) or os.path.islink(p)):
866 self._repo.ui.warn(_("copy failed: %s is not a file or a "
871 self._repo.ui.warn(_("copy failed: %s is not a file or a "
867 "symbolic link\n") % dest)
872 "symbolic link\n") % dest)
868 else:
873 else:
869 wlock = self._repo.wlock()
874 wlock = self._repo.wlock()
870 try:
875 try:
871 if self._repo.dirstate[dest] in '?r':
876 if self._repo.dirstate[dest] in '?r':
872 self._repo.dirstate.add(dest)
877 self._repo.dirstate.add(dest)
873 self._repo.dirstate.copy(source, dest)
878 self._repo.dirstate.copy(source, dest)
874 finally:
879 finally:
875 wlock.release()
880 wlock.release()
876
881
877 class workingfilectx(filectx):
882 class workingfilectx(filectx):
878 """A workingfilectx object makes access to data related to a particular
883 """A workingfilectx object makes access to data related to a particular
879 file in the working directory convenient."""
884 file in the working directory convenient."""
880 def __init__(self, repo, path, filelog=None, workingctx=None):
885 def __init__(self, repo, path, filelog=None, workingctx=None):
881 """changeid can be a changeset revision, node, or tag.
886 """changeid can be a changeset revision, node, or tag.
882 fileid can be a file revision or node."""
887 fileid can be a file revision or node."""
883 self._repo = repo
888 self._repo = repo
884 self._path = path
889 self._path = path
885 self._changeid = None
890 self._changeid = None
886 self._filerev = self._filenode = None
891 self._filerev = self._filenode = None
887
892
888 if filelog:
893 if filelog:
889 self._filelog = filelog
894 self._filelog = filelog
890 if workingctx:
895 if workingctx:
891 self._changectx = workingctx
896 self._changectx = workingctx
892
897
893 @propertycache
898 @propertycache
894 def _changectx(self):
899 def _changectx(self):
895 return workingctx(self._repo)
900 return workingctx(self._repo)
896
901
897 def __nonzero__(self):
902 def __nonzero__(self):
898 return True
903 return True
899
904
900 def __str__(self):
905 def __str__(self):
901 return "%s@%s" % (self.path(), self._changectx)
906 return "%s@%s" % (self.path(), self._changectx)
902
907
903 def __repr__(self):
908 def __repr__(self):
904 return "<workingfilectx %s>" % str(self)
909 return "<workingfilectx %s>" % str(self)
905
910
906 def data(self):
911 def data(self):
907 return self._repo.wread(self._path)
912 return self._repo.wread(self._path)
908 def renamed(self):
913 def renamed(self):
909 rp = self._repo.dirstate.copied(self._path)
914 rp = self._repo.dirstate.copied(self._path)
910 if not rp:
915 if not rp:
911 return None
916 return None
912 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
917 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
913
918
914 def parents(self):
919 def parents(self):
915 '''return parent filectxs, following copies if necessary'''
920 '''return parent filectxs, following copies if necessary'''
916 def filenode(ctx, path):
921 def filenode(ctx, path):
917 return ctx._manifest.get(path, nullid)
922 return ctx._manifest.get(path, nullid)
918
923
919 path = self._path
924 path = self._path
920 fl = self._filelog
925 fl = self._filelog
921 pcl = self._changectx._parents
926 pcl = self._changectx._parents
922 renamed = self.renamed()
927 renamed = self.renamed()
923
928
924 if renamed:
929 if renamed:
925 pl = [renamed + (None,)]
930 pl = [renamed + (None,)]
926 else:
931 else:
927 pl = [(path, filenode(pcl[0], path), fl)]
932 pl = [(path, filenode(pcl[0], path), fl)]
928
933
929 for pc in pcl[1:]:
934 for pc in pcl[1:]:
930 pl.append((path, filenode(pc, path), fl))
935 pl.append((path, filenode(pc, path), fl))
931
936
932 return [filectx(self._repo, p, fileid=n, filelog=l)
937 return [filectx(self._repo, p, fileid=n, filelog=l)
933 for p, n, l in pl if n != nullid]
938 for p, n, l in pl if n != nullid]
934
939
935 def children(self):
940 def children(self):
936 return []
941 return []
937
942
938 def size(self):
943 def size(self):
939 return os.lstat(self._repo.wjoin(self._path)).st_size
944 return os.lstat(self._repo.wjoin(self._path)).st_size
940 def date(self):
945 def date(self):
941 t, tz = self._changectx.date()
946 t, tz = self._changectx.date()
942 try:
947 try:
943 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
948 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
944 except OSError, err:
949 except OSError, err:
945 if err.errno != errno.ENOENT:
950 if err.errno != errno.ENOENT:
946 raise
951 raise
947 return (t, tz)
952 return (t, tz)
948
953
949 def cmp(self, fctx):
954 def cmp(self, fctx):
950 """compare with other file context
955 """compare with other file context
951
956
952 returns True if different than fctx.
957 returns True if different than fctx.
953 """
958 """
954 # fctx should be a filectx (not a wfctx)
959 # fctx should be a filectx (not a wfctx)
955 # invert comparison to reuse the same code path
960 # invert comparison to reuse the same code path
956 return fctx.cmp(self)
961 return fctx.cmp(self)
957
962
958 class memctx(object):
963 class memctx(object):
959 """Use memctx to perform in-memory commits via localrepo.commitctx().
964 """Use memctx to perform in-memory commits via localrepo.commitctx().
960
965
961 Revision information is supplied at initialization time while
966 Revision information is supplied at initialization time while
962 related files data and is made available through a callback
967 related files data and is made available through a callback
963 mechanism. 'repo' is the current localrepo, 'parents' is a
968 mechanism. 'repo' is the current localrepo, 'parents' is a
964 sequence of two parent revisions identifiers (pass None for every
969 sequence of two parent revisions identifiers (pass None for every
965 missing parent), 'text' is the commit message and 'files' lists
970 missing parent), 'text' is the commit message and 'files' lists
966 names of files touched by the revision (normalized and relative to
971 names of files touched by the revision (normalized and relative to
967 repository root).
972 repository root).
968
973
969 filectxfn(repo, memctx, path) is a callable receiving the
974 filectxfn(repo, memctx, path) is a callable receiving the
970 repository, the current memctx object and the normalized path of
975 repository, the current memctx object and the normalized path of
971 requested file, relative to repository root. It is fired by the
976 requested file, relative to repository root. It is fired by the
972 commit function for every file in 'files', but calls order is
977 commit function for every file in 'files', but calls order is
973 undefined. If the file is available in the revision being
978 undefined. If the file is available in the revision being
974 committed (updated or added), filectxfn returns a memfilectx
979 committed (updated or added), filectxfn returns a memfilectx
975 object. If the file was removed, filectxfn raises an
980 object. If the file was removed, filectxfn raises an
976 IOError. Moved files are represented by marking the source file
981 IOError. Moved files are represented by marking the source file
977 removed and the new file added with copy information (see
982 removed and the new file added with copy information (see
978 memfilectx).
983 memfilectx).
979
984
980 user receives the committer name and defaults to current
985 user receives the committer name and defaults to current
981 repository username, date is the commit date in any format
986 repository username, date is the commit date in any format
982 supported by util.parsedate() and defaults to current date, extra
987 supported by util.parsedate() and defaults to current date, extra
983 is a dictionary of metadata or is left empty.
988 is a dictionary of metadata or is left empty.
984 """
989 """
985 def __init__(self, repo, parents, text, files, filectxfn, user=None,
990 def __init__(self, repo, parents, text, files, filectxfn, user=None,
986 date=None, extra=None):
991 date=None, extra=None):
987 self._repo = repo
992 self._repo = repo
988 self._rev = None
993 self._rev = None
989 self._node = None
994 self._node = None
990 self._text = text
995 self._text = text
991 self._date = date and util.parsedate(date) or util.makedate()
996 self._date = date and util.parsedate(date) or util.makedate()
992 self._user = user
997 self._user = user
993 parents = [(p or nullid) for p in parents]
998 parents = [(p or nullid) for p in parents]
994 p1, p2 = parents
999 p1, p2 = parents
995 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1000 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
996 files = sorted(set(files))
1001 files = sorted(set(files))
997 self._status = [files, [], [], [], []]
1002 self._status = [files, [], [], [], []]
998 self._filectxfn = filectxfn
1003 self._filectxfn = filectxfn
999
1004
1000 self._extra = extra and extra.copy() or {}
1005 self._extra = extra and extra.copy() or {}
1001 if 'branch' not in self._extra:
1006 if 'branch' not in self._extra:
1002 self._extra['branch'] = 'default'
1007 self._extra['branch'] = 'default'
1003 elif self._extra.get('branch') == '':
1008 elif self._extra.get('branch') == '':
1004 self._extra['branch'] = 'default'
1009 self._extra['branch'] = 'default'
1005
1010
1006 def __str__(self):
1011 def __str__(self):
1007 return str(self._parents[0]) + "+"
1012 return str(self._parents[0]) + "+"
1008
1013
1009 def __int__(self):
1014 def __int__(self):
1010 return self._rev
1015 return self._rev
1011
1016
1012 def __nonzero__(self):
1017 def __nonzero__(self):
1013 return True
1018 return True
1014
1019
1015 def __getitem__(self, key):
1020 def __getitem__(self, key):
1016 return self.filectx(key)
1021 return self.filectx(key)
1017
1022
1018 def p1(self):
1023 def p1(self):
1019 return self._parents[0]
1024 return self._parents[0]
1020 def p2(self):
1025 def p2(self):
1021 return self._parents[1]
1026 return self._parents[1]
1022
1027
1023 def user(self):
1028 def user(self):
1024 return self._user or self._repo.ui.username()
1029 return self._user or self._repo.ui.username()
1025 def date(self):
1030 def date(self):
1026 return self._date
1031 return self._date
1027 def description(self):
1032 def description(self):
1028 return self._text
1033 return self._text
1029 def files(self):
1034 def files(self):
1030 return self.modified()
1035 return self.modified()
1031 def modified(self):
1036 def modified(self):
1032 return self._status[0]
1037 return self._status[0]
1033 def added(self):
1038 def added(self):
1034 return self._status[1]
1039 return self._status[1]
1035 def removed(self):
1040 def removed(self):
1036 return self._status[2]
1041 return self._status[2]
1037 def deleted(self):
1042 def deleted(self):
1038 return self._status[3]
1043 return self._status[3]
1039 def unknown(self):
1044 def unknown(self):
1040 return self._status[4]
1045 return self._status[4]
1041 def ignored(self):
1046 def ignored(self):
1042 return self._status[5]
1047 return self._status[5]
1043 def clean(self):
1048 def clean(self):
1044 return self._status[6]
1049 return self._status[6]
1045 def branch(self):
1050 def branch(self):
1046 return self._extra['branch']
1051 return self._extra['branch']
1047 def extra(self):
1052 def extra(self):
1048 return self._extra
1053 return self._extra
1049 def flags(self, f):
1054 def flags(self, f):
1050 return self[f].flags()
1055 return self[f].flags()
1051
1056
1052 def parents(self):
1057 def parents(self):
1053 """return contexts for each parent changeset"""
1058 """return contexts for each parent changeset"""
1054 return self._parents
1059 return self._parents
1055
1060
1056 def filectx(self, path, filelog=None):
1061 def filectx(self, path, filelog=None):
1057 """get a file context from the working directory"""
1062 """get a file context from the working directory"""
1058 return self._filectxfn(self._repo, self, path)
1063 return self._filectxfn(self._repo, self, path)
1059
1064
1060 def commit(self):
1065 def commit(self):
1061 """commit context to the repo"""
1066 """commit context to the repo"""
1062 return self._repo.commitctx(self)
1067 return self._repo.commitctx(self)
1063
1068
1064 class memfilectx(object):
1069 class memfilectx(object):
1065 """memfilectx represents an in-memory file to commit.
1070 """memfilectx represents an in-memory file to commit.
1066
1071
1067 See memctx for more details.
1072 See memctx for more details.
1068 """
1073 """
1069 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1074 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1070 """
1075 """
1071 path is the normalized file path relative to repository root.
1076 path is the normalized file path relative to repository root.
1072 data is the file content as a string.
1077 data is the file content as a string.
1073 islink is True if the file is a symbolic link.
1078 islink is True if the file is a symbolic link.
1074 isexec is True if the file is executable.
1079 isexec is True if the file is executable.
1075 copied is the source file path if current file was copied in the
1080 copied is the source file path if current file was copied in the
1076 revision being committed, or None."""
1081 revision being committed, or None."""
1077 self._path = path
1082 self._path = path
1078 self._data = data
1083 self._data = data
1079 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1084 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1080 self._copied = None
1085 self._copied = None
1081 if copied:
1086 if copied:
1082 self._copied = (copied, nullid)
1087 self._copied = (copied, nullid)
1083
1088
1084 def __nonzero__(self):
1089 def __nonzero__(self):
1085 return True
1090 return True
1086 def __str__(self):
1091 def __str__(self):
1087 return "%s@%s" % (self.path(), self._changectx)
1092 return "%s@%s" % (self.path(), self._changectx)
1088 def path(self):
1093 def path(self):
1089 return self._path
1094 return self._path
1090 def data(self):
1095 def data(self):
1091 return self._data
1096 return self._data
1092 def flags(self):
1097 def flags(self):
1093 return self._flags
1098 return self._flags
1094 def isexec(self):
1099 def isexec(self):
1095 return 'x' in self._flags
1100 return 'x' in self._flags
1096 def islink(self):
1101 def islink(self):
1097 return 'l' in self._flags
1102 return 'l' in self._flags
1098 def renamed(self):
1103 def renamed(self):
1099 return self._copied
1104 return self._copied
@@ -1,1899 +1,1904 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 return dirstate.dirstate(self.opener, self.ui, self.root)
181 return dirstate.dirstate(self.opener, self.ui, self.root)
182
182
183 def __getitem__(self, changeid):
183 def __getitem__(self, changeid):
184 if changeid is None:
184 if changeid is None:
185 return context.workingctx(self)
185 return context.workingctx(self)
186 return context.changectx(self, changeid)
186 return context.changectx(self, changeid)
187
187
188 def __contains__(self, changeid):
188 def __contains__(self, changeid):
189 try:
189 try:
190 return bool(self.lookup(changeid))
190 return bool(self.lookup(changeid))
191 except error.RepoLookupError:
191 except error.RepoLookupError:
192 return False
192 return False
193
193
194 def __nonzero__(self):
194 def __nonzero__(self):
195 return True
195 return True
196
196
197 def __len__(self):
197 def __len__(self):
198 return len(self.changelog)
198 return len(self.changelog)
199
199
200 def __iter__(self):
200 def __iter__(self):
201 for i in xrange(len(self)):
201 for i in xrange(len(self)):
202 yield i
202 yield i
203
203
204 def url(self):
204 def url(self):
205 return 'file:' + self.root
205 return 'file:' + self.root
206
206
207 def hook(self, name, throw=False, **args):
207 def hook(self, name, throw=False, **args):
208 return hook.hook(self.ui, self, name, throw, **args)
208 return hook.hook(self.ui, self, name, throw, **args)
209
209
210 tag_disallowed = ':\r\n'
210 tag_disallowed = ':\r\n'
211
211
212 def _tag(self, names, node, message, local, user, date, extra={}):
212 def _tag(self, names, node, message, local, user, date, extra={}):
213 if isinstance(names, str):
213 if isinstance(names, str):
214 allchars = names
214 allchars = names
215 names = (names,)
215 names = (names,)
216 else:
216 else:
217 allchars = ''.join(names)
217 allchars = ''.join(names)
218 for c in self.tag_disallowed:
218 for c in self.tag_disallowed:
219 if c in allchars:
219 if c in allchars:
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
221
221
222 branches = self.branchmap()
222 branches = self.branchmap()
223 for name in names:
223 for name in names:
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
225 local=local)
225 local=local)
226 if name in branches:
226 if name in branches:
227 self.ui.warn(_("warning: tag %s conflicts with existing"
227 self.ui.warn(_("warning: tag %s conflicts with existing"
228 " branch name\n") % name)
228 " branch name\n") % name)
229
229
230 def writetags(fp, names, munge, prevtags):
230 def writetags(fp, names, munge, prevtags):
231 fp.seek(0, 2)
231 fp.seek(0, 2)
232 if prevtags and prevtags[-1] != '\n':
232 if prevtags and prevtags[-1] != '\n':
233 fp.write('\n')
233 fp.write('\n')
234 for name in names:
234 for name in names:
235 m = munge and munge(name) or name
235 m = munge and munge(name) or name
236 if self._tagtypes and name in self._tagtypes:
236 if self._tagtypes and name in self._tagtypes:
237 old = self._tags.get(name, nullid)
237 old = self._tags.get(name, nullid)
238 fp.write('%s %s\n' % (hex(old), m))
238 fp.write('%s %s\n' % (hex(old), m))
239 fp.write('%s %s\n' % (hex(node), m))
239 fp.write('%s %s\n' % (hex(node), m))
240 fp.close()
240 fp.close()
241
241
242 prevtags = ''
242 prevtags = ''
243 if local:
243 if local:
244 try:
244 try:
245 fp = self.opener('localtags', 'r+')
245 fp = self.opener('localtags', 'r+')
246 except IOError:
246 except IOError:
247 fp = self.opener('localtags', 'a')
247 fp = self.opener('localtags', 'a')
248 else:
248 else:
249 prevtags = fp.read()
249 prevtags = fp.read()
250
250
251 # local tags are stored in the current charset
251 # local tags are stored in the current charset
252 writetags(fp, names, None, prevtags)
252 writetags(fp, names, None, prevtags)
253 for name in names:
253 for name in names:
254 self.hook('tag', node=hex(node), tag=name, local=local)
254 self.hook('tag', node=hex(node), tag=name, local=local)
255 return
255 return
256
256
257 try:
257 try:
258 fp = self.wfile('.hgtags', 'rb+')
258 fp = self.wfile('.hgtags', 'rb+')
259 except IOError:
259 except IOError:
260 fp = self.wfile('.hgtags', 'ab')
260 fp = self.wfile('.hgtags', 'ab')
261 else:
261 else:
262 prevtags = fp.read()
262 prevtags = fp.read()
263
263
264 # committed tags are stored in UTF-8
264 # committed tags are stored in UTF-8
265 writetags(fp, names, encoding.fromlocal, prevtags)
265 writetags(fp, names, encoding.fromlocal, prevtags)
266
266
267 if '.hgtags' not in self.dirstate:
267 if '.hgtags' not in self.dirstate:
268 self[None].add(['.hgtags'])
268 self[None].add(['.hgtags'])
269
269
270 m = matchmod.exact(self.root, '', ['.hgtags'])
270 m = matchmod.exact(self.root, '', ['.hgtags'])
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
272
272
273 for name in names:
273 for name in names:
274 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
275
275
276 return tagnode
276 return tagnode
277
277
278 def tag(self, names, node, message, local, user, date):
278 def tag(self, names, node, message, local, user, date):
279 '''tag a revision with one or more symbolic names.
279 '''tag a revision with one or more symbolic names.
280
280
281 names is a list of strings or, when adding a single tag, names may be a
281 names is a list of strings or, when adding a single tag, names may be a
282 string.
282 string.
283
283
284 if local is True, the tags are stored in a per-repository file.
284 if local is True, the tags are stored in a per-repository file.
285 otherwise, they are stored in the .hgtags file, and a new
285 otherwise, they are stored in the .hgtags file, and a new
286 changeset is committed with the change.
286 changeset is committed with the change.
287
287
288 keyword arguments:
288 keyword arguments:
289
289
290 local: whether to store tags in non-version-controlled file
290 local: whether to store tags in non-version-controlled file
291 (default False)
291 (default False)
292
292
293 message: commit message to use if committing
293 message: commit message to use if committing
294
294
295 user: name of user to use if committing
295 user: name of user to use if committing
296
296
297 date: date tuple to use if committing'''
297 date: date tuple to use if committing'''
298
298
299 for x in self.status()[:5]:
299 for x in self.status()[:5]:
300 if '.hgtags' in x:
300 if '.hgtags' in x:
301 raise util.Abort(_('working copy of .hgtags is changed '
301 raise util.Abort(_('working copy of .hgtags is changed '
302 '(please commit .hgtags manually)'))
302 '(please commit .hgtags manually)'))
303
303
304 self.tags() # instantiate the cache
304 self.tags() # instantiate the cache
305 self._tag(names, node, message, local, user, date)
305 self._tag(names, node, message, local, user, date)
306
306
307 def tags(self):
307 def tags(self):
308 '''return a mapping of tag to node'''
308 '''return a mapping of tag to node'''
309 if self._tags is None:
309 if self._tags is None:
310 (self._tags, self._tagtypes) = self._findtags()
310 (self._tags, self._tagtypes) = self._findtags()
311
311
312 return self._tags
312 return self._tags
313
313
314 def _findtags(self):
314 def _findtags(self):
315 '''Do the hard work of finding tags. Return a pair of dicts
315 '''Do the hard work of finding tags. Return a pair of dicts
316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 maps tag name to a string like \'global\' or \'local\'.
317 maps tag name to a string like \'global\' or \'local\'.
318 Subclasses or extensions are free to add their own tags, but
318 Subclasses or extensions are free to add their own tags, but
319 should be aware that the returned dicts will be retained for the
319 should be aware that the returned dicts will be retained for the
320 duration of the localrepo object.'''
320 duration of the localrepo object.'''
321
321
322 # XXX what tagtype should subclasses/extensions use? Currently
322 # XXX what tagtype should subclasses/extensions use? Currently
323 # mq and bookmarks add tags, but do not set the tagtype at all.
323 # mq and bookmarks add tags, but do not set the tagtype at all.
324 # Should each extension invent its own tag type? Should there
324 # Should each extension invent its own tag type? Should there
325 # be one tagtype for all such "virtual" tags? Or is the status
325 # be one tagtype for all such "virtual" tags? Or is the status
326 # quo fine?
326 # quo fine?
327
327
328 alltags = {} # map tag name to (node, hist)
328 alltags = {} # map tag name to (node, hist)
329 tagtypes = {}
329 tagtypes = {}
330
330
331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333
333
334 # Build the return dicts. Have to re-encode tag names because
334 # Build the return dicts. Have to re-encode tag names because
335 # the tags module always uses UTF-8 (in order not to lose info
335 # the tags module always uses UTF-8 (in order not to lose info
336 # writing to the cache), but the rest of Mercurial wants them in
336 # writing to the cache), but the rest of Mercurial wants them in
337 # local encoding.
337 # local encoding.
338 tags = {}
338 tags = {}
339 for (name, (node, hist)) in alltags.iteritems():
339 for (name, (node, hist)) in alltags.iteritems():
340 if node != nullid:
340 if node != nullid:
341 tags[encoding.tolocal(name)] = node
341 tags[encoding.tolocal(name)] = node
342 tags['tip'] = self.changelog.tip()
342 tags['tip'] = self.changelog.tip()
343 tagtypes = dict([(encoding.tolocal(name), value)
343 tagtypes = dict([(encoding.tolocal(name), value)
344 for (name, value) in tagtypes.iteritems()])
344 for (name, value) in tagtypes.iteritems()])
345 return (tags, tagtypes)
345 return (tags, tagtypes)
346
346
347 def tagtype(self, tagname):
347 def tagtype(self, tagname):
348 '''
348 '''
349 return the type of the given tag. result can be:
349 return the type of the given tag. result can be:
350
350
351 'local' : a local tag
351 'local' : a local tag
352 'global' : a global tag
352 'global' : a global tag
353 None : tag does not exist
353 None : tag does not exist
354 '''
354 '''
355
355
356 self.tags()
356 self.tags()
357
357
358 return self._tagtypes.get(tagname)
358 return self._tagtypes.get(tagname)
359
359
360 def tagslist(self):
360 def tagslist(self):
361 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
362 l = []
362 l = []
363 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
364 try:
364 try:
365 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
366 except:
366 except:
367 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
368 l.append((r, t, n))
369 return [(t, n) for r, t, n in sorted(l)]
369 return [(t, n) for r, t, n in sorted(l)]
370
370
371 def nodetags(self, node):
371 def nodetags(self, node):
372 '''return the tags associated with a node'''
372 '''return the tags associated with a node'''
373 if not self.nodetagscache:
373 if not self.nodetagscache:
374 self.nodetagscache = {}
374 self.nodetagscache = {}
375 for t, n in self.tags().iteritems():
375 for t, n in self.tags().iteritems():
376 self.nodetagscache.setdefault(n, []).append(t)
376 self.nodetagscache.setdefault(n, []).append(t)
377 for tags in self.nodetagscache.itervalues():
377 for tags in self.nodetagscache.itervalues():
378 tags.sort()
378 tags.sort()
379 return self.nodetagscache.get(node, [])
379 return self.nodetagscache.get(node, [])
380
380
381 def _branchtags(self, partial, lrev):
381 def _branchtags(self, partial, lrev):
382 # TODO: rename this function?
382 # TODO: rename this function?
383 tiprev = len(self) - 1
383 tiprev = len(self) - 1
384 if lrev != tiprev:
384 if lrev != tiprev:
385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 self._updatebranchcache(partial, ctxgen)
386 self._updatebranchcache(partial, ctxgen)
387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388
388
389 return partial
389 return partial
390
390
391 def updatebranchcache(self):
391 def updatebranchcache(self):
392 tip = self.changelog.tip()
392 tip = self.changelog.tip()
393 if self._branchcache is not None and self._branchcachetip == tip:
393 if self._branchcache is not None and self._branchcachetip == tip:
394 return self._branchcache
394 return self._branchcache
395
395
396 oldtip = self._branchcachetip
396 oldtip = self._branchcachetip
397 self._branchcachetip = tip
397 self._branchcachetip = tip
398 if oldtip is None or oldtip not in self.changelog.nodemap:
398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
399 partial, last, lrev = self._readbranchcache()
400 else:
400 else:
401 lrev = self.changelog.rev(oldtip)
401 lrev = self.changelog.rev(oldtip)
402 partial = self._branchcache
402 partial = self._branchcache
403
403
404 self._branchtags(partial, lrev)
404 self._branchtags(partial, lrev)
405 # this private cache holds all heads (not just tips)
405 # this private cache holds all heads (not just tips)
406 self._branchcache = partial
406 self._branchcache = partial
407
407
408 def branchmap(self):
408 def branchmap(self):
409 '''returns a dictionary {branch: [branchheads]}'''
409 '''returns a dictionary {branch: [branchheads]}'''
410 self.updatebranchcache()
410 self.updatebranchcache()
411 return self._branchcache
411 return self._branchcache
412
412
413 def branchtags(self):
413 def branchtags(self):
414 '''return a dict where branch names map to the tipmost head of
414 '''return a dict where branch names map to the tipmost head of
415 the branch, open heads come before closed'''
415 the branch, open heads come before closed'''
416 bt = {}
416 bt = {}
417 for bn, heads in self.branchmap().iteritems():
417 for bn, heads in self.branchmap().iteritems():
418 tip = heads[-1]
418 tip = heads[-1]
419 for h in reversed(heads):
419 for h in reversed(heads):
420 if 'close' not in self.changelog.read(h)[5]:
420 if 'close' not in self.changelog.read(h)[5]:
421 tip = h
421 tip = h
422 break
422 break
423 bt[bn] = tip
423 bt[bn] = tip
424 return bt
424 return bt
425
425
426
426
427 def _readbranchcache(self):
427 def _readbranchcache(self):
428 partial = {}
428 partial = {}
429 try:
429 try:
430 f = self.opener("branchheads.cache")
430 f = self.opener("branchheads.cache")
431 lines = f.read().split('\n')
431 lines = f.read().split('\n')
432 f.close()
432 f.close()
433 except (IOError, OSError):
433 except (IOError, OSError):
434 return {}, nullid, nullrev
434 return {}, nullid, nullrev
435
435
436 try:
436 try:
437 last, lrev = lines.pop(0).split(" ", 1)
437 last, lrev = lines.pop(0).split(" ", 1)
438 last, lrev = bin(last), int(lrev)
438 last, lrev = bin(last), int(lrev)
439 if lrev >= len(self) or self[lrev].node() != last:
439 if lrev >= len(self) or self[lrev].node() != last:
440 # invalidate the cache
440 # invalidate the cache
441 raise ValueError('invalidating branch cache (tip differs)')
441 raise ValueError('invalidating branch cache (tip differs)')
442 for l in lines:
442 for l in lines:
443 if not l:
443 if not l:
444 continue
444 continue
445 node, label = l.split(" ", 1)
445 node, label = l.split(" ", 1)
446 partial.setdefault(label.strip(), []).append(bin(node))
446 partial.setdefault(label.strip(), []).append(bin(node))
447 except KeyboardInterrupt:
447 except KeyboardInterrupt:
448 raise
448 raise
449 except Exception, inst:
449 except Exception, inst:
450 if self.ui.debugflag:
450 if self.ui.debugflag:
451 self.ui.warn(str(inst), '\n')
451 self.ui.warn(str(inst), '\n')
452 partial, last, lrev = {}, nullid, nullrev
452 partial, last, lrev = {}, nullid, nullrev
453 return partial, last, lrev
453 return partial, last, lrev
454
454
455 def _writebranchcache(self, branches, tip, tiprev):
455 def _writebranchcache(self, branches, tip, tiprev):
456 try:
456 try:
457 f = self.opener("branchheads.cache", "w", atomictemp=True)
457 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 f.write("%s %s\n" % (hex(tip), tiprev))
458 f.write("%s %s\n" % (hex(tip), tiprev))
459 for label, nodes in branches.iteritems():
459 for label, nodes in branches.iteritems():
460 for node in nodes:
460 for node in nodes:
461 f.write("%s %s\n" % (hex(node), label))
461 f.write("%s %s\n" % (hex(node), label))
462 f.rename()
462 f.rename()
463 except (IOError, OSError):
463 except (IOError, OSError):
464 pass
464 pass
465
465
466 def _updatebranchcache(self, partial, ctxgen):
466 def _updatebranchcache(self, partial, ctxgen):
467 # collect new branch entries
467 # collect new branch entries
468 newbranches = {}
468 newbranches = {}
469 for c in ctxgen:
469 for c in ctxgen:
470 newbranches.setdefault(c.branch(), []).append(c.node())
470 newbranches.setdefault(c.branch(), []).append(c.node())
471 # if older branchheads are reachable from new ones, they aren't
471 # if older branchheads are reachable from new ones, they aren't
472 # really branchheads. Note checking parents is insufficient:
472 # really branchheads. Note checking parents is insufficient:
473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 for branch, newnodes in newbranches.iteritems():
474 for branch, newnodes in newbranches.iteritems():
475 bheads = partial.setdefault(branch, [])
475 bheads = partial.setdefault(branch, [])
476 bheads.extend(newnodes)
476 bheads.extend(newnodes)
477 if len(bheads) <= 1:
477 if len(bheads) <= 1:
478 continue
478 continue
479 # starting from tip means fewer passes over reachable
479 # starting from tip means fewer passes over reachable
480 while newnodes:
480 while newnodes:
481 latest = newnodes.pop()
481 latest = newnodes.pop()
482 if latest not in bheads:
482 if latest not in bheads:
483 continue
483 continue
484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 reachable = self.changelog.reachable(latest, minbhrev)
485 reachable = self.changelog.reachable(latest, minbhrev)
486 reachable.remove(latest)
486 reachable.remove(latest)
487 bheads = [b for b in bheads if b not in reachable]
487 bheads = [b for b in bheads if b not in reachable]
488 partial[branch] = bheads
488 partial[branch] = bheads
489
489
490 def lookup(self, key):
490 def lookup(self, key):
491 if isinstance(key, int):
491 if isinstance(key, int):
492 return self.changelog.node(key)
492 return self.changelog.node(key)
493 elif key == '.':
493 elif key == '.':
494 return self.dirstate.parents()[0]
494 return self.dirstate.parents()[0]
495 elif key == 'null':
495 elif key == 'null':
496 return nullid
496 return nullid
497 elif key == 'tip':
497 elif key == 'tip':
498 return self.changelog.tip()
498 return self.changelog.tip()
499 n = self.changelog._match(key)
499 n = self.changelog._match(key)
500 if n:
500 if n:
501 return n
501 return n
502 if key in self.tags():
502 if key in self.tags():
503 return self.tags()[key]
503 return self.tags()[key]
504 if key in self.branchtags():
504 if key in self.branchtags():
505 return self.branchtags()[key]
505 return self.branchtags()[key]
506 n = self.changelog._partialmatch(key)
506 n = self.changelog._partialmatch(key)
507 if n:
507 if n:
508 return n
508 return n
509
509
510 # can't find key, check if it might have come from damaged dirstate
510 # can't find key, check if it might have come from damaged dirstate
511 if key in self.dirstate.parents():
511 if key in self.dirstate.parents():
512 raise error.Abort(_("working directory has unknown parent '%s'!")
512 raise error.Abort(_("working directory has unknown parent '%s'!")
513 % short(key))
513 % short(key))
514 try:
514 try:
515 if len(key) == 20:
515 if len(key) == 20:
516 key = hex(key)
516 key = hex(key)
517 except:
517 except:
518 pass
518 pass
519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520
520
521 def lookupbranch(self, key, remote=None):
521 def lookupbranch(self, key, remote=None):
522 repo = remote or self
522 repo = remote or self
523 if key in repo.branchmap():
523 if key in repo.branchmap():
524 return key
524 return key
525
525
526 repo = (remote and remote.local()) and remote or self
526 repo = (remote and remote.local()) and remote or self
527 return repo[key].branch()
527 return repo[key].branch()
528
528
529 def local(self):
529 def local(self):
530 return True
530 return True
531
531
532 def join(self, f):
532 def join(self, f):
533 return os.path.join(self.path, f)
533 return os.path.join(self.path, f)
534
534
535 def wjoin(self, f):
535 def wjoin(self, f):
536 return os.path.join(self.root, f)
536 return os.path.join(self.root, f)
537
537
538 def file(self, f):
538 def file(self, f):
539 if f[0] == '/':
539 if f[0] == '/':
540 f = f[1:]
540 f = f[1:]
541 return filelog.filelog(self.sopener, f)
541 return filelog.filelog(self.sopener, f)
542
542
543 def changectx(self, changeid):
543 def changectx(self, changeid):
544 return self[changeid]
544 return self[changeid]
545
545
546 def parents(self, changeid=None):
546 def parents(self, changeid=None):
547 '''get list of changectxs for parents of changeid'''
547 '''get list of changectxs for parents of changeid'''
548 return self[changeid].parents()
548 return self[changeid].parents()
549
549
550 def filectx(self, path, changeid=None, fileid=None):
550 def filectx(self, path, changeid=None, fileid=None):
551 """changeid can be a changeset revision, node, or tag.
551 """changeid can be a changeset revision, node, or tag.
552 fileid can be a file revision or node."""
552 fileid can be a file revision or node."""
553 return context.filectx(self, path, changeid, fileid)
553 return context.filectx(self, path, changeid, fileid)
554
554
555 def getcwd(self):
555 def getcwd(self):
556 return self.dirstate.getcwd()
556 return self.dirstate.getcwd()
557
557
558 def pathto(self, f, cwd=None):
558 def pathto(self, f, cwd=None):
559 return self.dirstate.pathto(f, cwd)
559 return self.dirstate.pathto(f, cwd)
560
560
561 def wfile(self, f, mode='r'):
561 def wfile(self, f, mode='r'):
562 return self.wopener(f, mode)
562 return self.wopener(f, mode)
563
563
564 def _link(self, f):
564 def _link(self, f):
565 return os.path.islink(self.wjoin(f))
565 return os.path.islink(self.wjoin(f))
566
566
567 def _loadfilter(self, filter):
567 def _loadfilter(self, filter):
568 if filter not in self.filterpats:
568 if filter not in self.filterpats:
569 l = []
569 l = []
570 for pat, cmd in self.ui.configitems(filter):
570 for pat, cmd in self.ui.configitems(filter):
571 if cmd == '!':
571 if cmd == '!':
572 continue
572 continue
573 mf = matchmod.match(self.root, '', [pat])
573 mf = matchmod.match(self.root, '', [pat])
574 fn = None
574 fn = None
575 params = cmd
575 params = cmd
576 for name, filterfn in self._datafilters.iteritems():
576 for name, filterfn in self._datafilters.iteritems():
577 if cmd.startswith(name):
577 if cmd.startswith(name):
578 fn = filterfn
578 fn = filterfn
579 params = cmd[len(name):].lstrip()
579 params = cmd[len(name):].lstrip()
580 break
580 break
581 if not fn:
581 if not fn:
582 fn = lambda s, c, **kwargs: util.filter(s, c)
582 fn = lambda s, c, **kwargs: util.filter(s, c)
583 # Wrap old filters not supporting keyword arguments
583 # Wrap old filters not supporting keyword arguments
584 if not inspect.getargspec(fn)[2]:
584 if not inspect.getargspec(fn)[2]:
585 oldfn = fn
585 oldfn = fn
586 fn = lambda s, c, **kwargs: oldfn(s, c)
586 fn = lambda s, c, **kwargs: oldfn(s, c)
587 l.append((mf, fn, params))
587 l.append((mf, fn, params))
588 self.filterpats[filter] = l
588 self.filterpats[filter] = l
589 return self.filterpats[filter]
589 return self.filterpats[filter]
590
590
591 def _filter(self, filterpats, filename, data):
591 def _filter(self, filterpats, filename, data):
592 for mf, fn, cmd in filterpats:
592 for mf, fn, cmd in filterpats:
593 if mf(filename):
593 if mf(filename):
594 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
594 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 break
596 break
597
597
598 return data
598 return data
599
599
600 @propertycache
600 @propertycache
601 def _encodefilterpats(self):
601 def _encodefilterpats(self):
602 return self._loadfilter('encode')
602 return self._loadfilter('encode')
603
603
604 @propertycache
604 @propertycache
605 def _decodefilterpats(self):
605 def _decodefilterpats(self):
606 return self._loadfilter('decode')
606 return self._loadfilter('decode')
607
607
608 def adddatafilter(self, name, filter):
608 def adddatafilter(self, name, filter):
609 self._datafilters[name] = filter
609 self._datafilters[name] = filter
610
610
611 def wread(self, filename):
611 def wread(self, filename):
612 if self._link(filename):
612 if self._link(filename):
613 data = os.readlink(self.wjoin(filename))
613 data = os.readlink(self.wjoin(filename))
614 else:
614 else:
615 data = self.wopener(filename, 'r').read()
615 data = self.wopener(filename, 'r').read()
616 return self._filter(self._encodefilterpats, filename, data)
616 return self._filter(self._encodefilterpats, filename, data)
617
617
618 def wwrite(self, filename, data, flags):
618 def wwrite(self, filename, data, flags):
619 data = self._filter(self._decodefilterpats, filename, data)
619 data = self._filter(self._decodefilterpats, filename, data)
620 try:
620 try:
621 os.unlink(self.wjoin(filename))
621 os.unlink(self.wjoin(filename))
622 except OSError:
622 except OSError:
623 pass
623 pass
624 if 'l' in flags:
624 if 'l' in flags:
625 self.wopener.symlink(data, filename)
625 self.wopener.symlink(data, filename)
626 else:
626 else:
627 self.wopener(filename, 'w').write(data)
627 self.wopener(filename, 'w').write(data)
628 if 'x' in flags:
628 if 'x' in flags:
629 util.set_flags(self.wjoin(filename), False, True)
629 util.set_flags(self.wjoin(filename), False, True)
630
630
631 def wwritedata(self, filename, data):
631 def wwritedata(self, filename, data):
632 return self._filter(self._decodefilterpats, filename, data)
632 return self._filter(self._decodefilterpats, filename, data)
633
633
634 def transaction(self, desc):
634 def transaction(self, desc):
635 tr = self._transref and self._transref() or None
635 tr = self._transref and self._transref() or None
636 if tr and tr.running():
636 if tr and tr.running():
637 return tr.nest()
637 return tr.nest()
638
638
639 # abort here if the journal already exists
639 # abort here if the journal already exists
640 if os.path.exists(self.sjoin("journal")):
640 if os.path.exists(self.sjoin("journal")):
641 raise error.RepoError(
641 raise error.RepoError(
642 _("abandoned transaction found - run hg recover"))
642 _("abandoned transaction found - run hg recover"))
643
643
644 # save dirstate for rollback
644 # save dirstate for rollback
645 try:
645 try:
646 ds = self.opener("dirstate").read()
646 ds = self.opener("dirstate").read()
647 except IOError:
647 except IOError:
648 ds = ""
648 ds = ""
649 self.opener("journal.dirstate", "w").write(ds)
649 self.opener("journal.dirstate", "w").write(ds)
650 self.opener("journal.branch", "w").write(self.dirstate.branch())
650 self.opener("journal.branch", "w").write(self.dirstate.branch())
651 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
651 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
652
652
653 renames = [(self.sjoin("journal"), self.sjoin("undo")),
653 renames = [(self.sjoin("journal"), self.sjoin("undo")),
654 (self.join("journal.dirstate"), self.join("undo.dirstate")),
654 (self.join("journal.dirstate"), self.join("undo.dirstate")),
655 (self.join("journal.branch"), self.join("undo.branch")),
655 (self.join("journal.branch"), self.join("undo.branch")),
656 (self.join("journal.desc"), self.join("undo.desc"))]
656 (self.join("journal.desc"), self.join("undo.desc"))]
657 tr = transaction.transaction(self.ui.warn, self.sopener,
657 tr = transaction.transaction(self.ui.warn, self.sopener,
658 self.sjoin("journal"),
658 self.sjoin("journal"),
659 aftertrans(renames),
659 aftertrans(renames),
660 self.store.createmode)
660 self.store.createmode)
661 self._transref = weakref.ref(tr)
661 self._transref = weakref.ref(tr)
662 return tr
662 return tr
663
663
664 def recover(self):
664 def recover(self):
665 lock = self.lock()
665 lock = self.lock()
666 try:
666 try:
667 if os.path.exists(self.sjoin("journal")):
667 if os.path.exists(self.sjoin("journal")):
668 self.ui.status(_("rolling back interrupted transaction\n"))
668 self.ui.status(_("rolling back interrupted transaction\n"))
669 transaction.rollback(self.sopener, self.sjoin("journal"),
669 transaction.rollback(self.sopener, self.sjoin("journal"),
670 self.ui.warn)
670 self.ui.warn)
671 self.invalidate()
671 self.invalidate()
672 return True
672 return True
673 else:
673 else:
674 self.ui.warn(_("no interrupted transaction available\n"))
674 self.ui.warn(_("no interrupted transaction available\n"))
675 return False
675 return False
676 finally:
676 finally:
677 lock.release()
677 lock.release()
678
678
679 def rollback(self, dryrun=False):
679 def rollback(self, dryrun=False):
680 wlock = lock = None
680 wlock = lock = None
681 try:
681 try:
682 wlock = self.wlock()
682 wlock = self.wlock()
683 lock = self.lock()
683 lock = self.lock()
684 if os.path.exists(self.sjoin("undo")):
684 if os.path.exists(self.sjoin("undo")):
685 try:
685 try:
686 args = self.opener("undo.desc", "r").read().splitlines()
686 args = self.opener("undo.desc", "r").read().splitlines()
687 if len(args) >= 3 and self.ui.verbose:
687 if len(args) >= 3 and self.ui.verbose:
688 desc = _("rolling back to revision %s"
688 desc = _("rolling back to revision %s"
689 " (undo %s: %s)\n") % (
689 " (undo %s: %s)\n") % (
690 int(args[0]) - 1, args[1], args[2])
690 int(args[0]) - 1, args[1], args[2])
691 elif len(args) >= 2:
691 elif len(args) >= 2:
692 desc = _("rolling back to revision %s (undo %s)\n") % (
692 desc = _("rolling back to revision %s (undo %s)\n") % (
693 int(args[0]) - 1, args[1])
693 int(args[0]) - 1, args[1])
694 except IOError:
694 except IOError:
695 desc = _("rolling back unknown transaction\n")
695 desc = _("rolling back unknown transaction\n")
696 self.ui.status(desc)
696 self.ui.status(desc)
697 if dryrun:
697 if dryrun:
698 return
698 return
699 transaction.rollback(self.sopener, self.sjoin("undo"),
699 transaction.rollback(self.sopener, self.sjoin("undo"),
700 self.ui.warn)
700 self.ui.warn)
701 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
701 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
702 try:
702 try:
703 branch = self.opener("undo.branch").read()
703 branch = self.opener("undo.branch").read()
704 self.dirstate.setbranch(branch)
704 self.dirstate.setbranch(branch)
705 except IOError:
705 except IOError:
706 self.ui.warn(_("Named branch could not be reset, "
706 self.ui.warn(_("Named branch could not be reset, "
707 "current branch still is: %s\n")
707 "current branch still is: %s\n")
708 % encoding.tolocal(self.dirstate.branch()))
708 % encoding.tolocal(self.dirstate.branch()))
709 self.invalidate()
709 self.invalidate()
710 self.dirstate.invalidate()
710 self.dirstate.invalidate()
711 self.destroyed()
711 self.destroyed()
712 else:
712 else:
713 self.ui.warn(_("no rollback information available\n"))
713 self.ui.warn(_("no rollback information available\n"))
714 return 1
714 return 1
715 finally:
715 finally:
716 release(lock, wlock)
716 release(lock, wlock)
717
717
718 def invalidatecaches(self):
718 def invalidatecaches(self):
719 self._tags = None
719 self._tags = None
720 self._tagtypes = None
720 self._tagtypes = None
721 self.nodetagscache = None
721 self.nodetagscache = None
722 self._branchcache = None # in UTF-8
722 self._branchcache = None # in UTF-8
723 self._branchcachetip = None
723 self._branchcachetip = None
724
724
725 def invalidate(self):
725 def invalidate(self):
726 for a in "changelog manifest".split():
726 for a in "changelog manifest".split():
727 if a in self.__dict__:
727 if a in self.__dict__:
728 delattr(self, a)
728 delattr(self, a)
729 self.invalidatecaches()
729 self.invalidatecaches()
730
730
731 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
731 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
732 try:
732 try:
733 l = lock.lock(lockname, 0, releasefn, desc=desc)
733 l = lock.lock(lockname, 0, releasefn, desc=desc)
734 except error.LockHeld, inst:
734 except error.LockHeld, inst:
735 if not wait:
735 if not wait:
736 raise
736 raise
737 self.ui.warn(_("waiting for lock on %s held by %r\n") %
737 self.ui.warn(_("waiting for lock on %s held by %r\n") %
738 (desc, inst.locker))
738 (desc, inst.locker))
739 # default to 600 seconds timeout
739 # default to 600 seconds timeout
740 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
740 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
741 releasefn, desc=desc)
741 releasefn, desc=desc)
742 if acquirefn:
742 if acquirefn:
743 acquirefn()
743 acquirefn()
744 return l
744 return l
745
745
746 def lock(self, wait=True):
746 def lock(self, wait=True):
747 '''Lock the repository store (.hg/store) and return a weak reference
747 '''Lock the repository store (.hg/store) and return a weak reference
748 to the lock. Use this before modifying the store (e.g. committing or
748 to the lock. Use this before modifying the store (e.g. committing or
749 stripping). If you are opening a transaction, get a lock as well.)'''
749 stripping). If you are opening a transaction, get a lock as well.)'''
750 l = self._lockref and self._lockref()
750 l = self._lockref and self._lockref()
751 if l is not None and l.held:
751 if l is not None and l.held:
752 l.lock()
752 l.lock()
753 return l
753 return l
754
754
755 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
755 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
756 _('repository %s') % self.origroot)
756 _('repository %s') % self.origroot)
757 self._lockref = weakref.ref(l)
757 self._lockref = weakref.ref(l)
758 return l
758 return l
759
759
760 def wlock(self, wait=True):
760 def wlock(self, wait=True):
761 '''Lock the non-store parts of the repository (everything under
761 '''Lock the non-store parts of the repository (everything under
762 .hg except .hg/store) and return a weak reference to the lock.
762 .hg except .hg/store) and return a weak reference to the lock.
763 Use this before modifying files in .hg.'''
763 Use this before modifying files in .hg.'''
764 l = self._wlockref and self._wlockref()
764 l = self._wlockref and self._wlockref()
765 if l is not None and l.held:
765 if l is not None and l.held:
766 l.lock()
766 l.lock()
767 return l
767 return l
768
768
769 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
769 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
770 self.dirstate.invalidate, _('working directory of %s') %
770 self.dirstate.invalidate, _('working directory of %s') %
771 self.origroot)
771 self.origroot)
772 self._wlockref = weakref.ref(l)
772 self._wlockref = weakref.ref(l)
773 return l
773 return l
774
774
775 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
775 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
776 """
776 """
777 commit an individual file as part of a larger transaction
777 commit an individual file as part of a larger transaction
778 """
778 """
779
779
780 fname = fctx.path()
780 fname = fctx.path()
781 text = fctx.data()
781 text = fctx.data()
782 flog = self.file(fname)
782 flog = self.file(fname)
783 fparent1 = manifest1.get(fname, nullid)
783 fparent1 = manifest1.get(fname, nullid)
784 fparent2 = fparent2o = manifest2.get(fname, nullid)
784 fparent2 = fparent2o = manifest2.get(fname, nullid)
785
785
786 meta = {}
786 meta = {}
787 copy = fctx.renamed()
787 copy = fctx.renamed()
788 if copy and copy[0] != fname:
788 if copy and copy[0] != fname:
789 # Mark the new revision of this file as a copy of another
789 # Mark the new revision of this file as a copy of another
790 # file. This copy data will effectively act as a parent
790 # file. This copy data will effectively act as a parent
791 # of this new revision. If this is a merge, the first
791 # of this new revision. If this is a merge, the first
792 # parent will be the nullid (meaning "look up the copy data")
792 # parent will be the nullid (meaning "look up the copy data")
793 # and the second one will be the other parent. For example:
793 # and the second one will be the other parent. For example:
794 #
794 #
795 # 0 --- 1 --- 3 rev1 changes file foo
795 # 0 --- 1 --- 3 rev1 changes file foo
796 # \ / rev2 renames foo to bar and changes it
796 # \ / rev2 renames foo to bar and changes it
797 # \- 2 -/ rev3 should have bar with all changes and
797 # \- 2 -/ rev3 should have bar with all changes and
798 # should record that bar descends from
798 # should record that bar descends from
799 # bar in rev2 and foo in rev1
799 # bar in rev2 and foo in rev1
800 #
800 #
801 # this allows this merge to succeed:
801 # this allows this merge to succeed:
802 #
802 #
803 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
803 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
804 # \ / merging rev3 and rev4 should use bar@rev2
804 # \ / merging rev3 and rev4 should use bar@rev2
805 # \- 2 --- 4 as the merge base
805 # \- 2 --- 4 as the merge base
806 #
806 #
807
807
808 cfname = copy[0]
808 cfname = copy[0]
809 crev = manifest1.get(cfname)
809 crev = manifest1.get(cfname)
810 newfparent = fparent2
810 newfparent = fparent2
811
811
812 if manifest2: # branch merge
812 if manifest2: # branch merge
813 if fparent2 == nullid or crev is None: # copied on remote side
813 if fparent2 == nullid or crev is None: # copied on remote side
814 if cfname in manifest2:
814 if cfname in manifest2:
815 crev = manifest2[cfname]
815 crev = manifest2[cfname]
816 newfparent = fparent1
816 newfparent = fparent1
817
817
818 # find source in nearest ancestor if we've lost track
818 # find source in nearest ancestor if we've lost track
819 if not crev:
819 if not crev:
820 self.ui.debug(" %s: searching for copy revision for %s\n" %
820 self.ui.debug(" %s: searching for copy revision for %s\n" %
821 (fname, cfname))
821 (fname, cfname))
822 for ancestor in self['.'].ancestors():
822 for ancestor in self[None].ancestors():
823 if cfname in ancestor:
823 if cfname in ancestor:
824 crev = ancestor[cfname].filenode()
824 crev = ancestor[cfname].filenode()
825 break
825 break
826
826
827 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
827 if crev:
828 meta["copy"] = cfname
828 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
829 meta["copyrev"] = hex(crev)
829 meta["copy"] = cfname
830 fparent1, fparent2 = nullid, newfparent
830 meta["copyrev"] = hex(crev)
831 fparent1, fparent2 = nullid, newfparent
832 else:
833 self.ui.warn(_("warning: can't find ancestor for '%s' "
834 "copied from '%s'!\n") % (fname, cfname))
835
831 elif fparent2 != nullid:
836 elif fparent2 != nullid:
832 # is one parent an ancestor of the other?
837 # is one parent an ancestor of the other?
833 fparentancestor = flog.ancestor(fparent1, fparent2)
838 fparentancestor = flog.ancestor(fparent1, fparent2)
834 if fparentancestor == fparent1:
839 if fparentancestor == fparent1:
835 fparent1, fparent2 = fparent2, nullid
840 fparent1, fparent2 = fparent2, nullid
836 elif fparentancestor == fparent2:
841 elif fparentancestor == fparent2:
837 fparent2 = nullid
842 fparent2 = nullid
838
843
839 # is the file changed?
844 # is the file changed?
840 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
845 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
841 changelist.append(fname)
846 changelist.append(fname)
842 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
847 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
843
848
844 # are just the flags changed during merge?
849 # are just the flags changed during merge?
845 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
850 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
846 changelist.append(fname)
851 changelist.append(fname)
847
852
848 return fparent1
853 return fparent1
849
854
850 def commit(self, text="", user=None, date=None, match=None, force=False,
855 def commit(self, text="", user=None, date=None, match=None, force=False,
851 editor=False, extra={}):
856 editor=False, extra={}):
852 """Add a new revision to current repository.
857 """Add a new revision to current repository.
853
858
854 Revision information is gathered from the working directory,
859 Revision information is gathered from the working directory,
855 match can be used to filter the committed files. If editor is
860 match can be used to filter the committed files. If editor is
856 supplied, it is called to get a commit message.
861 supplied, it is called to get a commit message.
857 """
862 """
858
863
859 def fail(f, msg):
864 def fail(f, msg):
860 raise util.Abort('%s: %s' % (f, msg))
865 raise util.Abort('%s: %s' % (f, msg))
861
866
862 if not match:
867 if not match:
863 match = matchmod.always(self.root, '')
868 match = matchmod.always(self.root, '')
864
869
865 if not force:
870 if not force:
866 vdirs = []
871 vdirs = []
867 match.dir = vdirs.append
872 match.dir = vdirs.append
868 match.bad = fail
873 match.bad = fail
869
874
870 wlock = self.wlock()
875 wlock = self.wlock()
871 try:
876 try:
872 wctx = self[None]
877 wctx = self[None]
873 merge = len(wctx.parents()) > 1
878 merge = len(wctx.parents()) > 1
874
879
875 if (not force and merge and match and
880 if (not force and merge and match and
876 (match.files() or match.anypats())):
881 (match.files() or match.anypats())):
877 raise util.Abort(_('cannot partially commit a merge '
882 raise util.Abort(_('cannot partially commit a merge '
878 '(do not specify files or patterns)'))
883 '(do not specify files or patterns)'))
879
884
880 changes = self.status(match=match, clean=force)
885 changes = self.status(match=match, clean=force)
881 if force:
886 if force:
882 changes[0].extend(changes[6]) # mq may commit unchanged files
887 changes[0].extend(changes[6]) # mq may commit unchanged files
883
888
884 # check subrepos
889 # check subrepos
885 subs = []
890 subs = []
886 removedsubs = set()
891 removedsubs = set()
887 for p in wctx.parents():
892 for p in wctx.parents():
888 removedsubs.update(s for s in p.substate if match(s))
893 removedsubs.update(s for s in p.substate if match(s))
889 for s in wctx.substate:
894 for s in wctx.substate:
890 removedsubs.discard(s)
895 removedsubs.discard(s)
891 if match(s) and wctx.sub(s).dirty():
896 if match(s) and wctx.sub(s).dirty():
892 subs.append(s)
897 subs.append(s)
893 if (subs or removedsubs):
898 if (subs or removedsubs):
894 if (not match('.hgsub') and
899 if (not match('.hgsub') and
895 '.hgsub' in (wctx.modified() + wctx.added())):
900 '.hgsub' in (wctx.modified() + wctx.added())):
896 raise util.Abort(_("can't commit subrepos without .hgsub"))
901 raise util.Abort(_("can't commit subrepos without .hgsub"))
897 if '.hgsubstate' not in changes[0]:
902 if '.hgsubstate' not in changes[0]:
898 changes[0].insert(0, '.hgsubstate')
903 changes[0].insert(0, '.hgsubstate')
899
904
900 # make sure all explicit patterns are matched
905 # make sure all explicit patterns are matched
901 if not force and match.files():
906 if not force and match.files():
902 matched = set(changes[0] + changes[1] + changes[2])
907 matched = set(changes[0] + changes[1] + changes[2])
903
908
904 for f in match.files():
909 for f in match.files():
905 if f == '.' or f in matched or f in wctx.substate:
910 if f == '.' or f in matched or f in wctx.substate:
906 continue
911 continue
907 if f in changes[3]: # missing
912 if f in changes[3]: # missing
908 fail(f, _('file not found!'))
913 fail(f, _('file not found!'))
909 if f in vdirs: # visited directory
914 if f in vdirs: # visited directory
910 d = f + '/'
915 d = f + '/'
911 for mf in matched:
916 for mf in matched:
912 if mf.startswith(d):
917 if mf.startswith(d):
913 break
918 break
914 else:
919 else:
915 fail(f, _("no match under directory!"))
920 fail(f, _("no match under directory!"))
916 elif f not in self.dirstate:
921 elif f not in self.dirstate:
917 fail(f, _("file not tracked!"))
922 fail(f, _("file not tracked!"))
918
923
919 if (not force and not extra.get("close") and not merge
924 if (not force and not extra.get("close") and not merge
920 and not (changes[0] or changes[1] or changes[2])
925 and not (changes[0] or changes[1] or changes[2])
921 and wctx.branch() == wctx.p1().branch()):
926 and wctx.branch() == wctx.p1().branch()):
922 return None
927 return None
923
928
924 ms = mergemod.mergestate(self)
929 ms = mergemod.mergestate(self)
925 for f in changes[0]:
930 for f in changes[0]:
926 if f in ms and ms[f] == 'u':
931 if f in ms and ms[f] == 'u':
927 raise util.Abort(_("unresolved merge conflicts "
932 raise util.Abort(_("unresolved merge conflicts "
928 "(see hg resolve)"))
933 "(see hg resolve)"))
929
934
930 cctx = context.workingctx(self, text, user, date, extra, changes)
935 cctx = context.workingctx(self, text, user, date, extra, changes)
931 if editor:
936 if editor:
932 cctx._text = editor(self, cctx, subs)
937 cctx._text = editor(self, cctx, subs)
933 edited = (text != cctx._text)
938 edited = (text != cctx._text)
934
939
935 # commit subs
940 # commit subs
936 if subs or removedsubs:
941 if subs or removedsubs:
937 state = wctx.substate.copy()
942 state = wctx.substate.copy()
938 for s in sorted(subs):
943 for s in sorted(subs):
939 sub = wctx.sub(s)
944 sub = wctx.sub(s)
940 self.ui.status(_('committing subrepository %s\n') %
945 self.ui.status(_('committing subrepository %s\n') %
941 subrepo.subrelpath(sub))
946 subrepo.subrelpath(sub))
942 sr = sub.commit(cctx._text, user, date)
947 sr = sub.commit(cctx._text, user, date)
943 state[s] = (state[s][0], sr)
948 state[s] = (state[s][0], sr)
944 subrepo.writestate(self, state)
949 subrepo.writestate(self, state)
945
950
946 # Save commit message in case this transaction gets rolled back
951 # Save commit message in case this transaction gets rolled back
947 # (e.g. by a pretxncommit hook). Leave the content alone on
952 # (e.g. by a pretxncommit hook). Leave the content alone on
948 # the assumption that the user will use the same editor again.
953 # the assumption that the user will use the same editor again.
949 msgfile = self.opener('last-message.txt', 'wb')
954 msgfile = self.opener('last-message.txt', 'wb')
950 msgfile.write(cctx._text)
955 msgfile.write(cctx._text)
951 msgfile.close()
956 msgfile.close()
952
957
953 p1, p2 = self.dirstate.parents()
958 p1, p2 = self.dirstate.parents()
954 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
959 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
955 try:
960 try:
956 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
961 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
957 ret = self.commitctx(cctx, True)
962 ret = self.commitctx(cctx, True)
958 except:
963 except:
959 if edited:
964 if edited:
960 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
965 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
961 self.ui.write(
966 self.ui.write(
962 _('note: commit message saved in %s\n') % msgfn)
967 _('note: commit message saved in %s\n') % msgfn)
963 raise
968 raise
964
969
965 # update dirstate and mergestate
970 # update dirstate and mergestate
966 for f in changes[0] + changes[1]:
971 for f in changes[0] + changes[1]:
967 self.dirstate.normal(f)
972 self.dirstate.normal(f)
968 for f in changes[2]:
973 for f in changes[2]:
969 self.dirstate.forget(f)
974 self.dirstate.forget(f)
970 self.dirstate.setparents(ret)
975 self.dirstate.setparents(ret)
971 ms.reset()
976 ms.reset()
972 finally:
977 finally:
973 wlock.release()
978 wlock.release()
974
979
975 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
980 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
976 return ret
981 return ret
977
982
978 def commitctx(self, ctx, error=False):
983 def commitctx(self, ctx, error=False):
979 """Add a new revision to current repository.
984 """Add a new revision to current repository.
980 Revision information is passed via the context argument.
985 Revision information is passed via the context argument.
981 """
986 """
982
987
983 tr = lock = None
988 tr = lock = None
984 removed = list(ctx.removed())
989 removed = list(ctx.removed())
985 p1, p2 = ctx.p1(), ctx.p2()
990 p1, p2 = ctx.p1(), ctx.p2()
986 m1 = p1.manifest().copy()
991 m1 = p1.manifest().copy()
987 m2 = p2.manifest()
992 m2 = p2.manifest()
988 user = ctx.user()
993 user = ctx.user()
989
994
990 lock = self.lock()
995 lock = self.lock()
991 try:
996 try:
992 tr = self.transaction("commit")
997 tr = self.transaction("commit")
993 trp = weakref.proxy(tr)
998 trp = weakref.proxy(tr)
994
999
995 # check in files
1000 # check in files
996 new = {}
1001 new = {}
997 changed = []
1002 changed = []
998 linkrev = len(self)
1003 linkrev = len(self)
999 for f in sorted(ctx.modified() + ctx.added()):
1004 for f in sorted(ctx.modified() + ctx.added()):
1000 self.ui.note(f + "\n")
1005 self.ui.note(f + "\n")
1001 try:
1006 try:
1002 fctx = ctx[f]
1007 fctx = ctx[f]
1003 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1008 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1004 changed)
1009 changed)
1005 m1.set(f, fctx.flags())
1010 m1.set(f, fctx.flags())
1006 except OSError, inst:
1011 except OSError, inst:
1007 self.ui.warn(_("trouble committing %s!\n") % f)
1012 self.ui.warn(_("trouble committing %s!\n") % f)
1008 raise
1013 raise
1009 except IOError, inst:
1014 except IOError, inst:
1010 errcode = getattr(inst, 'errno', errno.ENOENT)
1015 errcode = getattr(inst, 'errno', errno.ENOENT)
1011 if error or errcode and errcode != errno.ENOENT:
1016 if error or errcode and errcode != errno.ENOENT:
1012 self.ui.warn(_("trouble committing %s!\n") % f)
1017 self.ui.warn(_("trouble committing %s!\n") % f)
1013 raise
1018 raise
1014 else:
1019 else:
1015 removed.append(f)
1020 removed.append(f)
1016
1021
1017 # update manifest
1022 # update manifest
1018 m1.update(new)
1023 m1.update(new)
1019 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1024 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1020 drop = [f for f in removed if f in m1]
1025 drop = [f for f in removed if f in m1]
1021 for f in drop:
1026 for f in drop:
1022 del m1[f]
1027 del m1[f]
1023 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1028 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1024 p2.manifestnode(), (new, drop))
1029 p2.manifestnode(), (new, drop))
1025
1030
1026 # update changelog
1031 # update changelog
1027 self.changelog.delayupdate()
1032 self.changelog.delayupdate()
1028 n = self.changelog.add(mn, changed + removed, ctx.description(),
1033 n = self.changelog.add(mn, changed + removed, ctx.description(),
1029 trp, p1.node(), p2.node(),
1034 trp, p1.node(), p2.node(),
1030 user, ctx.date(), ctx.extra().copy())
1035 user, ctx.date(), ctx.extra().copy())
1031 p = lambda: self.changelog.writepending() and self.root or ""
1036 p = lambda: self.changelog.writepending() and self.root or ""
1032 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1037 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1033 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1038 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1034 parent2=xp2, pending=p)
1039 parent2=xp2, pending=p)
1035 self.changelog.finalize(trp)
1040 self.changelog.finalize(trp)
1036 tr.close()
1041 tr.close()
1037
1042
1038 if self._branchcache:
1043 if self._branchcache:
1039 self.updatebranchcache()
1044 self.updatebranchcache()
1040 return n
1045 return n
1041 finally:
1046 finally:
1042 if tr:
1047 if tr:
1043 tr.release()
1048 tr.release()
1044 lock.release()
1049 lock.release()
1045
1050
1046 def destroyed(self):
1051 def destroyed(self):
1047 '''Inform the repository that nodes have been destroyed.
1052 '''Inform the repository that nodes have been destroyed.
1048 Intended for use by strip and rollback, so there's a common
1053 Intended for use by strip and rollback, so there's a common
1049 place for anything that has to be done after destroying history.'''
1054 place for anything that has to be done after destroying history.'''
1050 # XXX it might be nice if we could take the list of destroyed
1055 # XXX it might be nice if we could take the list of destroyed
1051 # nodes, but I don't see an easy way for rollback() to do that
1056 # nodes, but I don't see an easy way for rollback() to do that
1052
1057
1053 # Ensure the persistent tag cache is updated. Doing it now
1058 # Ensure the persistent tag cache is updated. Doing it now
1054 # means that the tag cache only has to worry about destroyed
1059 # means that the tag cache only has to worry about destroyed
1055 # heads immediately after a strip/rollback. That in turn
1060 # heads immediately after a strip/rollback. That in turn
1056 # guarantees that "cachetip == currenttip" (comparing both rev
1061 # guarantees that "cachetip == currenttip" (comparing both rev
1057 # and node) always means no nodes have been added or destroyed.
1062 # and node) always means no nodes have been added or destroyed.
1058
1063
1059 # XXX this is suboptimal when qrefresh'ing: we strip the current
1064 # XXX this is suboptimal when qrefresh'ing: we strip the current
1060 # head, refresh the tag cache, then immediately add a new head.
1065 # head, refresh the tag cache, then immediately add a new head.
1061 # But I think doing it this way is necessary for the "instant
1066 # But I think doing it this way is necessary for the "instant
1062 # tag cache retrieval" case to work.
1067 # tag cache retrieval" case to work.
1063 self.invalidatecaches()
1068 self.invalidatecaches()
1064
1069
1065 def walk(self, match, node=None):
1070 def walk(self, match, node=None):
1066 '''
1071 '''
1067 walk recursively through the directory tree or a given
1072 walk recursively through the directory tree or a given
1068 changeset, finding all files matched by the match
1073 changeset, finding all files matched by the match
1069 function
1074 function
1070 '''
1075 '''
1071 return self[node].walk(match)
1076 return self[node].walk(match)
1072
1077
1073 def status(self, node1='.', node2=None, match=None,
1078 def status(self, node1='.', node2=None, match=None,
1074 ignored=False, clean=False, unknown=False,
1079 ignored=False, clean=False, unknown=False,
1075 listsubrepos=False):
1080 listsubrepos=False):
1076 """return status of files between two nodes or node and working directory
1081 """return status of files between two nodes or node and working directory
1077
1082
1078 If node1 is None, use the first dirstate parent instead.
1083 If node1 is None, use the first dirstate parent instead.
1079 If node2 is None, compare node1 with working directory.
1084 If node2 is None, compare node1 with working directory.
1080 """
1085 """
1081
1086
1082 def mfmatches(ctx):
1087 def mfmatches(ctx):
1083 mf = ctx.manifest().copy()
1088 mf = ctx.manifest().copy()
1084 for fn in mf.keys():
1089 for fn in mf.keys():
1085 if not match(fn):
1090 if not match(fn):
1086 del mf[fn]
1091 del mf[fn]
1087 return mf
1092 return mf
1088
1093
1089 if isinstance(node1, context.changectx):
1094 if isinstance(node1, context.changectx):
1090 ctx1 = node1
1095 ctx1 = node1
1091 else:
1096 else:
1092 ctx1 = self[node1]
1097 ctx1 = self[node1]
1093 if isinstance(node2, context.changectx):
1098 if isinstance(node2, context.changectx):
1094 ctx2 = node2
1099 ctx2 = node2
1095 else:
1100 else:
1096 ctx2 = self[node2]
1101 ctx2 = self[node2]
1097
1102
1098 working = ctx2.rev() is None
1103 working = ctx2.rev() is None
1099 parentworking = working and ctx1 == self['.']
1104 parentworking = working and ctx1 == self['.']
1100 match = match or matchmod.always(self.root, self.getcwd())
1105 match = match or matchmod.always(self.root, self.getcwd())
1101 listignored, listclean, listunknown = ignored, clean, unknown
1106 listignored, listclean, listunknown = ignored, clean, unknown
1102
1107
1103 # load earliest manifest first for caching reasons
1108 # load earliest manifest first for caching reasons
1104 if not working and ctx2.rev() < ctx1.rev():
1109 if not working and ctx2.rev() < ctx1.rev():
1105 ctx2.manifest()
1110 ctx2.manifest()
1106
1111
1107 if not parentworking:
1112 if not parentworking:
1108 def bad(f, msg):
1113 def bad(f, msg):
1109 if f not in ctx1:
1114 if f not in ctx1:
1110 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1115 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1111 match.bad = bad
1116 match.bad = bad
1112
1117
1113 if working: # we need to scan the working dir
1118 if working: # we need to scan the working dir
1114 subrepos = []
1119 subrepos = []
1115 if '.hgsub' in self.dirstate:
1120 if '.hgsub' in self.dirstate:
1116 subrepos = ctx1.substate.keys()
1121 subrepos = ctx1.substate.keys()
1117 s = self.dirstate.status(match, subrepos, listignored,
1122 s = self.dirstate.status(match, subrepos, listignored,
1118 listclean, listunknown)
1123 listclean, listunknown)
1119 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1124 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1120
1125
1121 # check for any possibly clean files
1126 # check for any possibly clean files
1122 if parentworking and cmp:
1127 if parentworking and cmp:
1123 fixup = []
1128 fixup = []
1124 # do a full compare of any files that might have changed
1129 # do a full compare of any files that might have changed
1125 for f in sorted(cmp):
1130 for f in sorted(cmp):
1126 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1131 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1127 or ctx1[f].cmp(ctx2[f])):
1132 or ctx1[f].cmp(ctx2[f])):
1128 modified.append(f)
1133 modified.append(f)
1129 else:
1134 else:
1130 fixup.append(f)
1135 fixup.append(f)
1131
1136
1132 # update dirstate for files that are actually clean
1137 # update dirstate for files that are actually clean
1133 if fixup:
1138 if fixup:
1134 if listclean:
1139 if listclean:
1135 clean += fixup
1140 clean += fixup
1136
1141
1137 try:
1142 try:
1138 # updating the dirstate is optional
1143 # updating the dirstate is optional
1139 # so we don't wait on the lock
1144 # so we don't wait on the lock
1140 wlock = self.wlock(False)
1145 wlock = self.wlock(False)
1141 try:
1146 try:
1142 for f in fixup:
1147 for f in fixup:
1143 self.dirstate.normal(f)
1148 self.dirstate.normal(f)
1144 finally:
1149 finally:
1145 wlock.release()
1150 wlock.release()
1146 except error.LockError:
1151 except error.LockError:
1147 pass
1152 pass
1148
1153
1149 if not parentworking:
1154 if not parentworking:
1150 mf1 = mfmatches(ctx1)
1155 mf1 = mfmatches(ctx1)
1151 if working:
1156 if working:
1152 # we are comparing working dir against non-parent
1157 # we are comparing working dir against non-parent
1153 # generate a pseudo-manifest for the working dir
1158 # generate a pseudo-manifest for the working dir
1154 mf2 = mfmatches(self['.'])
1159 mf2 = mfmatches(self['.'])
1155 for f in cmp + modified + added:
1160 for f in cmp + modified + added:
1156 mf2[f] = None
1161 mf2[f] = None
1157 mf2.set(f, ctx2.flags(f))
1162 mf2.set(f, ctx2.flags(f))
1158 for f in removed:
1163 for f in removed:
1159 if f in mf2:
1164 if f in mf2:
1160 del mf2[f]
1165 del mf2[f]
1161 else:
1166 else:
1162 # we are comparing two revisions
1167 # we are comparing two revisions
1163 deleted, unknown, ignored = [], [], []
1168 deleted, unknown, ignored = [], [], []
1164 mf2 = mfmatches(ctx2)
1169 mf2 = mfmatches(ctx2)
1165
1170
1166 modified, added, clean = [], [], []
1171 modified, added, clean = [], [], []
1167 for fn in mf2:
1172 for fn in mf2:
1168 if fn in mf1:
1173 if fn in mf1:
1169 if (mf1.flags(fn) != mf2.flags(fn) or
1174 if (mf1.flags(fn) != mf2.flags(fn) or
1170 (mf1[fn] != mf2[fn] and
1175 (mf1[fn] != mf2[fn] and
1171 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1176 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1172 modified.append(fn)
1177 modified.append(fn)
1173 elif listclean:
1178 elif listclean:
1174 clean.append(fn)
1179 clean.append(fn)
1175 del mf1[fn]
1180 del mf1[fn]
1176 else:
1181 else:
1177 added.append(fn)
1182 added.append(fn)
1178 removed = mf1.keys()
1183 removed = mf1.keys()
1179
1184
1180 r = modified, added, removed, deleted, unknown, ignored, clean
1185 r = modified, added, removed, deleted, unknown, ignored, clean
1181
1186
1182 if listsubrepos:
1187 if listsubrepos:
1183 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1188 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1184 if working:
1189 if working:
1185 rev2 = None
1190 rev2 = None
1186 else:
1191 else:
1187 rev2 = ctx2.substate[subpath][1]
1192 rev2 = ctx2.substate[subpath][1]
1188 try:
1193 try:
1189 submatch = matchmod.narrowmatcher(subpath, match)
1194 submatch = matchmod.narrowmatcher(subpath, match)
1190 s = sub.status(rev2, match=submatch, ignored=listignored,
1195 s = sub.status(rev2, match=submatch, ignored=listignored,
1191 clean=listclean, unknown=listunknown,
1196 clean=listclean, unknown=listunknown,
1192 listsubrepos=True)
1197 listsubrepos=True)
1193 for rfiles, sfiles in zip(r, s):
1198 for rfiles, sfiles in zip(r, s):
1194 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1199 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1195 except error.LookupError:
1200 except error.LookupError:
1196 self.ui.status(_("skipping missing subrepository: %s\n")
1201 self.ui.status(_("skipping missing subrepository: %s\n")
1197 % subpath)
1202 % subpath)
1198
1203
1199 [l.sort() for l in r]
1204 [l.sort() for l in r]
1200 return r
1205 return r
1201
1206
1202 def heads(self, start=None):
1207 def heads(self, start=None):
1203 heads = self.changelog.heads(start)
1208 heads = self.changelog.heads(start)
1204 # sort the output in rev descending order
1209 # sort the output in rev descending order
1205 return sorted(heads, key=self.changelog.rev, reverse=True)
1210 return sorted(heads, key=self.changelog.rev, reverse=True)
1206
1211
1207 def branchheads(self, branch=None, start=None, closed=False):
1212 def branchheads(self, branch=None, start=None, closed=False):
1208 '''return a (possibly filtered) list of heads for the given branch
1213 '''return a (possibly filtered) list of heads for the given branch
1209
1214
1210 Heads are returned in topological order, from newest to oldest.
1215 Heads are returned in topological order, from newest to oldest.
1211 If branch is None, use the dirstate branch.
1216 If branch is None, use the dirstate branch.
1212 If start is not None, return only heads reachable from start.
1217 If start is not None, return only heads reachable from start.
1213 If closed is True, return heads that are marked as closed as well.
1218 If closed is True, return heads that are marked as closed as well.
1214 '''
1219 '''
1215 if branch is None:
1220 if branch is None:
1216 branch = self[None].branch()
1221 branch = self[None].branch()
1217 branches = self.branchmap()
1222 branches = self.branchmap()
1218 if branch not in branches:
1223 if branch not in branches:
1219 return []
1224 return []
1220 # the cache returns heads ordered lowest to highest
1225 # the cache returns heads ordered lowest to highest
1221 bheads = list(reversed(branches[branch]))
1226 bheads = list(reversed(branches[branch]))
1222 if start is not None:
1227 if start is not None:
1223 # filter out the heads that cannot be reached from startrev
1228 # filter out the heads that cannot be reached from startrev
1224 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1229 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1225 bheads = [h for h in bheads if h in fbheads]
1230 bheads = [h for h in bheads if h in fbheads]
1226 if not closed:
1231 if not closed:
1227 bheads = [h for h in bheads if
1232 bheads = [h for h in bheads if
1228 ('close' not in self.changelog.read(h)[5])]
1233 ('close' not in self.changelog.read(h)[5])]
1229 return bheads
1234 return bheads
1230
1235
1231 def branches(self, nodes):
1236 def branches(self, nodes):
1232 if not nodes:
1237 if not nodes:
1233 nodes = [self.changelog.tip()]
1238 nodes = [self.changelog.tip()]
1234 b = []
1239 b = []
1235 for n in nodes:
1240 for n in nodes:
1236 t = n
1241 t = n
1237 while 1:
1242 while 1:
1238 p = self.changelog.parents(n)
1243 p = self.changelog.parents(n)
1239 if p[1] != nullid or p[0] == nullid:
1244 if p[1] != nullid or p[0] == nullid:
1240 b.append((t, n, p[0], p[1]))
1245 b.append((t, n, p[0], p[1]))
1241 break
1246 break
1242 n = p[0]
1247 n = p[0]
1243 return b
1248 return b
1244
1249
1245 def between(self, pairs):
1250 def between(self, pairs):
1246 r = []
1251 r = []
1247
1252
1248 for top, bottom in pairs:
1253 for top, bottom in pairs:
1249 n, l, i = top, [], 0
1254 n, l, i = top, [], 0
1250 f = 1
1255 f = 1
1251
1256
1252 while n != bottom and n != nullid:
1257 while n != bottom and n != nullid:
1253 p = self.changelog.parents(n)[0]
1258 p = self.changelog.parents(n)[0]
1254 if i == f:
1259 if i == f:
1255 l.append(n)
1260 l.append(n)
1256 f = f * 2
1261 f = f * 2
1257 n = p
1262 n = p
1258 i += 1
1263 i += 1
1259
1264
1260 r.append(l)
1265 r.append(l)
1261
1266
1262 return r
1267 return r
1263
1268
1264 def pull(self, remote, heads=None, force=False):
1269 def pull(self, remote, heads=None, force=False):
1265 lock = self.lock()
1270 lock = self.lock()
1266 try:
1271 try:
1267 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1272 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1268 force=force)
1273 force=force)
1269 common, fetch, rheads = tmp
1274 common, fetch, rheads = tmp
1270 if not fetch:
1275 if not fetch:
1271 self.ui.status(_("no changes found\n"))
1276 self.ui.status(_("no changes found\n"))
1272 return 0
1277 return 0
1273
1278
1274 if heads is None and fetch == [nullid]:
1279 if heads is None and fetch == [nullid]:
1275 self.ui.status(_("requesting all changes\n"))
1280 self.ui.status(_("requesting all changes\n"))
1276 elif heads is None and remote.capable('changegroupsubset'):
1281 elif heads is None and remote.capable('changegroupsubset'):
1277 # issue1320, avoid a race if remote changed after discovery
1282 # issue1320, avoid a race if remote changed after discovery
1278 heads = rheads
1283 heads = rheads
1279
1284
1280 if heads is None:
1285 if heads is None:
1281 cg = remote.changegroup(fetch, 'pull')
1286 cg = remote.changegroup(fetch, 'pull')
1282 else:
1287 else:
1283 if not remote.capable('changegroupsubset'):
1288 if not remote.capable('changegroupsubset'):
1284 raise util.Abort(_("partial pull cannot be done because "
1289 raise util.Abort(_("partial pull cannot be done because "
1285 "other repository doesn't support "
1290 "other repository doesn't support "
1286 "changegroupsubset."))
1291 "changegroupsubset."))
1287 cg = remote.changegroupsubset(fetch, heads, 'pull')
1292 cg = remote.changegroupsubset(fetch, heads, 'pull')
1288 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1293 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1289 finally:
1294 finally:
1290 lock.release()
1295 lock.release()
1291
1296
1292 def push(self, remote, force=False, revs=None, newbranch=False):
1297 def push(self, remote, force=False, revs=None, newbranch=False):
1293 '''Push outgoing changesets (limited by revs) from the current
1298 '''Push outgoing changesets (limited by revs) from the current
1294 repository to remote. Return an integer:
1299 repository to remote. Return an integer:
1295 - 0 means HTTP error *or* nothing to push
1300 - 0 means HTTP error *or* nothing to push
1296 - 1 means we pushed and remote head count is unchanged *or*
1301 - 1 means we pushed and remote head count is unchanged *or*
1297 we have outgoing changesets but refused to push
1302 we have outgoing changesets but refused to push
1298 - other values as described by addchangegroup()
1303 - other values as described by addchangegroup()
1299 '''
1304 '''
1300 # there are two ways to push to remote repo:
1305 # there are two ways to push to remote repo:
1301 #
1306 #
1302 # addchangegroup assumes local user can lock remote
1307 # addchangegroup assumes local user can lock remote
1303 # repo (local filesystem, old ssh servers).
1308 # repo (local filesystem, old ssh servers).
1304 #
1309 #
1305 # unbundle assumes local user cannot lock remote repo (new ssh
1310 # unbundle assumes local user cannot lock remote repo (new ssh
1306 # servers, http servers).
1311 # servers, http servers).
1307
1312
1308 lock = None
1313 lock = None
1309 unbundle = remote.capable('unbundle')
1314 unbundle = remote.capable('unbundle')
1310 if not unbundle:
1315 if not unbundle:
1311 lock = remote.lock()
1316 lock = remote.lock()
1312 try:
1317 try:
1313 ret = discovery.prepush(self, remote, force, revs, newbranch)
1318 ret = discovery.prepush(self, remote, force, revs, newbranch)
1314 if ret[0] is None:
1319 if ret[0] is None:
1315 # and here we return 0 for "nothing to push" or 1 for
1320 # and here we return 0 for "nothing to push" or 1 for
1316 # "something to push but I refuse"
1321 # "something to push but I refuse"
1317 return ret[1]
1322 return ret[1]
1318
1323
1319 cg, remote_heads = ret
1324 cg, remote_heads = ret
1320 if unbundle:
1325 if unbundle:
1321 # local repo finds heads on server, finds out what revs it must
1326 # local repo finds heads on server, finds out what revs it must
1322 # push. once revs transferred, if server finds it has
1327 # push. once revs transferred, if server finds it has
1323 # different heads (someone else won commit/push race), server
1328 # different heads (someone else won commit/push race), server
1324 # aborts.
1329 # aborts.
1325 if force:
1330 if force:
1326 remote_heads = ['force']
1331 remote_heads = ['force']
1327 # ssh: return remote's addchangegroup()
1332 # ssh: return remote's addchangegroup()
1328 # http: return remote's addchangegroup() or 0 for error
1333 # http: return remote's addchangegroup() or 0 for error
1329 return remote.unbundle(cg, remote_heads, 'push')
1334 return remote.unbundle(cg, remote_heads, 'push')
1330 else:
1335 else:
1331 # we return an integer indicating remote head count change
1336 # we return an integer indicating remote head count change
1332 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1337 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1333 finally:
1338 finally:
1334 if lock is not None:
1339 if lock is not None:
1335 lock.release()
1340 lock.release()
1336
1341
1337 def changegroupinfo(self, nodes, source):
1342 def changegroupinfo(self, nodes, source):
1338 if self.ui.verbose or source == 'bundle':
1343 if self.ui.verbose or source == 'bundle':
1339 self.ui.status(_("%d changesets found\n") % len(nodes))
1344 self.ui.status(_("%d changesets found\n") % len(nodes))
1340 if self.ui.debugflag:
1345 if self.ui.debugflag:
1341 self.ui.debug("list of changesets:\n")
1346 self.ui.debug("list of changesets:\n")
1342 for node in nodes:
1347 for node in nodes:
1343 self.ui.debug("%s\n" % hex(node))
1348 self.ui.debug("%s\n" % hex(node))
1344
1349
1345 def changegroupsubset(self, bases, heads, source, extranodes=None):
1350 def changegroupsubset(self, bases, heads, source, extranodes=None):
1346 """Compute a changegroup consisting of all the nodes that are
1351 """Compute a changegroup consisting of all the nodes that are
1347 descendents of any of the bases and ancestors of any of the heads.
1352 descendents of any of the bases and ancestors of any of the heads.
1348 Return a chunkbuffer object whose read() method will return
1353 Return a chunkbuffer object whose read() method will return
1349 successive changegroup chunks.
1354 successive changegroup chunks.
1350
1355
1351 It is fairly complex as determining which filenodes and which
1356 It is fairly complex as determining which filenodes and which
1352 manifest nodes need to be included for the changeset to be complete
1357 manifest nodes need to be included for the changeset to be complete
1353 is non-trivial.
1358 is non-trivial.
1354
1359
1355 Another wrinkle is doing the reverse, figuring out which changeset in
1360 Another wrinkle is doing the reverse, figuring out which changeset in
1356 the changegroup a particular filenode or manifestnode belongs to.
1361 the changegroup a particular filenode or manifestnode belongs to.
1357
1362
1358 The caller can specify some nodes that must be included in the
1363 The caller can specify some nodes that must be included in the
1359 changegroup using the extranodes argument. It should be a dict
1364 changegroup using the extranodes argument. It should be a dict
1360 where the keys are the filenames (or 1 for the manifest), and the
1365 where the keys are the filenames (or 1 for the manifest), and the
1361 values are lists of (node, linknode) tuples, where node is a wanted
1366 values are lists of (node, linknode) tuples, where node is a wanted
1362 node and linknode is the changelog node that should be transmitted as
1367 node and linknode is the changelog node that should be transmitted as
1363 the linkrev.
1368 the linkrev.
1364 """
1369 """
1365
1370
1366 # Set up some initial variables
1371 # Set up some initial variables
1367 # Make it easy to refer to self.changelog
1372 # Make it easy to refer to self.changelog
1368 cl = self.changelog
1373 cl = self.changelog
1369 # Compute the list of changesets in this changegroup.
1374 # Compute the list of changesets in this changegroup.
1370 # Some bases may turn out to be superfluous, and some heads may be
1375 # Some bases may turn out to be superfluous, and some heads may be
1371 # too. nodesbetween will return the minimal set of bases and heads
1376 # too. nodesbetween will return the minimal set of bases and heads
1372 # necessary to re-create the changegroup.
1377 # necessary to re-create the changegroup.
1373 if not bases:
1378 if not bases:
1374 bases = [nullid]
1379 bases = [nullid]
1375 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1380 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1376
1381
1377 if extranodes is None:
1382 if extranodes is None:
1378 # can we go through the fast path ?
1383 # can we go through the fast path ?
1379 heads.sort()
1384 heads.sort()
1380 allheads = self.heads()
1385 allheads = self.heads()
1381 allheads.sort()
1386 allheads.sort()
1382 if heads == allheads:
1387 if heads == allheads:
1383 return self._changegroup(msng_cl_lst, source)
1388 return self._changegroup(msng_cl_lst, source)
1384
1389
1385 # slow path
1390 # slow path
1386 self.hook('preoutgoing', throw=True, source=source)
1391 self.hook('preoutgoing', throw=True, source=source)
1387
1392
1388 self.changegroupinfo(msng_cl_lst, source)
1393 self.changegroupinfo(msng_cl_lst, source)
1389
1394
1390 # We assume that all ancestors of bases are known
1395 # We assume that all ancestors of bases are known
1391 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1396 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1392
1397
1393 # Make it easy to refer to self.manifest
1398 # Make it easy to refer to self.manifest
1394 mnfst = self.manifest
1399 mnfst = self.manifest
1395 # We don't know which manifests are missing yet
1400 # We don't know which manifests are missing yet
1396 msng_mnfst_set = {}
1401 msng_mnfst_set = {}
1397 # Nor do we know which filenodes are missing.
1402 # Nor do we know which filenodes are missing.
1398 msng_filenode_set = {}
1403 msng_filenode_set = {}
1399
1404
1400 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1405 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1401 junk = None
1406 junk = None
1402
1407
1403 # A changeset always belongs to itself, so the changenode lookup
1408 # A changeset always belongs to itself, so the changenode lookup
1404 # function for a changenode is identity.
1409 # function for a changenode is identity.
1405 def identity(x):
1410 def identity(x):
1406 return x
1411 return x
1407
1412
1408 # A function generating function that sets up the initial environment
1413 # A function generating function that sets up the initial environment
1409 # the inner function.
1414 # the inner function.
1410 def filenode_collector(changedfiles):
1415 def filenode_collector(changedfiles):
1411 # This gathers information from each manifestnode included in the
1416 # This gathers information from each manifestnode included in the
1412 # changegroup about which filenodes the manifest node references
1417 # changegroup about which filenodes the manifest node references
1413 # so we can include those in the changegroup too.
1418 # so we can include those in the changegroup too.
1414 #
1419 #
1415 # It also remembers which changenode each filenode belongs to. It
1420 # It also remembers which changenode each filenode belongs to. It
1416 # does this by assuming the a filenode belongs to the changenode
1421 # does this by assuming the a filenode belongs to the changenode
1417 # the first manifest that references it belongs to.
1422 # the first manifest that references it belongs to.
1418 def collect_msng_filenodes(mnfstnode):
1423 def collect_msng_filenodes(mnfstnode):
1419 r = mnfst.rev(mnfstnode)
1424 r = mnfst.rev(mnfstnode)
1420 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1425 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1421 # If the previous rev is one of the parents,
1426 # If the previous rev is one of the parents,
1422 # we only need to see a diff.
1427 # we only need to see a diff.
1423 deltamf = mnfst.readdelta(mnfstnode)
1428 deltamf = mnfst.readdelta(mnfstnode)
1424 # For each line in the delta
1429 # For each line in the delta
1425 for f, fnode in deltamf.iteritems():
1430 for f, fnode in deltamf.iteritems():
1426 # And if the file is in the list of files we care
1431 # And if the file is in the list of files we care
1427 # about.
1432 # about.
1428 if f in changedfiles:
1433 if f in changedfiles:
1429 # Get the changenode this manifest belongs to
1434 # Get the changenode this manifest belongs to
1430 clnode = msng_mnfst_set[mnfstnode]
1435 clnode = msng_mnfst_set[mnfstnode]
1431 # Create the set of filenodes for the file if
1436 # Create the set of filenodes for the file if
1432 # there isn't one already.
1437 # there isn't one already.
1433 ndset = msng_filenode_set.setdefault(f, {})
1438 ndset = msng_filenode_set.setdefault(f, {})
1434 # And set the filenode's changelog node to the
1439 # And set the filenode's changelog node to the
1435 # manifest's if it hasn't been set already.
1440 # manifest's if it hasn't been set already.
1436 ndset.setdefault(fnode, clnode)
1441 ndset.setdefault(fnode, clnode)
1437 else:
1442 else:
1438 # Otherwise we need a full manifest.
1443 # Otherwise we need a full manifest.
1439 m = mnfst.read(mnfstnode)
1444 m = mnfst.read(mnfstnode)
1440 # For every file in we care about.
1445 # For every file in we care about.
1441 for f in changedfiles:
1446 for f in changedfiles:
1442 fnode = m.get(f, None)
1447 fnode = m.get(f, None)
1443 # If it's in the manifest
1448 # If it's in the manifest
1444 if fnode is not None:
1449 if fnode is not None:
1445 # See comments above.
1450 # See comments above.
1446 clnode = msng_mnfst_set[mnfstnode]
1451 clnode = msng_mnfst_set[mnfstnode]
1447 ndset = msng_filenode_set.setdefault(f, {})
1452 ndset = msng_filenode_set.setdefault(f, {})
1448 ndset.setdefault(fnode, clnode)
1453 ndset.setdefault(fnode, clnode)
1449 return collect_msng_filenodes
1454 return collect_msng_filenodes
1450
1455
1451 # If we determine that a particular file or manifest node must be a
1456 # If we determine that a particular file or manifest node must be a
1452 # node that the recipient of the changegroup will already have, we can
1457 # node that the recipient of the changegroup will already have, we can
1453 # also assume the recipient will have all the parents. This function
1458 # also assume the recipient will have all the parents. This function
1454 # prunes them from the set of missing nodes.
1459 # prunes them from the set of missing nodes.
1455 def prune(revlog, missingnodes):
1460 def prune(revlog, missingnodes):
1456 hasset = set()
1461 hasset = set()
1457 # If a 'missing' filenode thinks it belongs to a changenode we
1462 # If a 'missing' filenode thinks it belongs to a changenode we
1458 # assume the recipient must have, then the recipient must have
1463 # assume the recipient must have, then the recipient must have
1459 # that filenode.
1464 # that filenode.
1460 for n in missingnodes:
1465 for n in missingnodes:
1461 clrev = revlog.linkrev(revlog.rev(n))
1466 clrev = revlog.linkrev(revlog.rev(n))
1462 if clrev in commonrevs:
1467 if clrev in commonrevs:
1463 hasset.add(n)
1468 hasset.add(n)
1464 for n in hasset:
1469 for n in hasset:
1465 missingnodes.pop(n, None)
1470 missingnodes.pop(n, None)
1466 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1471 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1467 missingnodes.pop(revlog.node(r), None)
1472 missingnodes.pop(revlog.node(r), None)
1468
1473
1469 # Add the nodes that were explicitly requested.
1474 # Add the nodes that were explicitly requested.
1470 def add_extra_nodes(name, nodes):
1475 def add_extra_nodes(name, nodes):
1471 if not extranodes or name not in extranodes:
1476 if not extranodes or name not in extranodes:
1472 return
1477 return
1473
1478
1474 for node, linknode in extranodes[name]:
1479 for node, linknode in extranodes[name]:
1475 if node not in nodes:
1480 if node not in nodes:
1476 nodes[node] = linknode
1481 nodes[node] = linknode
1477
1482
1478 # Now that we have all theses utility functions to help out and
1483 # Now that we have all theses utility functions to help out and
1479 # logically divide up the task, generate the group.
1484 # logically divide up the task, generate the group.
1480 def gengroup():
1485 def gengroup():
1481 # The set of changed files starts empty.
1486 # The set of changed files starts empty.
1482 changedfiles = set()
1487 changedfiles = set()
1483 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1488 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1484
1489
1485 # Create a changenode group generator that will call our functions
1490 # Create a changenode group generator that will call our functions
1486 # back to lookup the owning changenode and collect information.
1491 # back to lookup the owning changenode and collect information.
1487 group = cl.group(msng_cl_lst, identity, collect)
1492 group = cl.group(msng_cl_lst, identity, collect)
1488 for cnt, chnk in enumerate(group):
1493 for cnt, chnk in enumerate(group):
1489 yield chnk
1494 yield chnk
1490 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1495 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1491 self.ui.progress(_('bundling changes'), None)
1496 self.ui.progress(_('bundling changes'), None)
1492
1497
1493 prune(mnfst, msng_mnfst_set)
1498 prune(mnfst, msng_mnfst_set)
1494 add_extra_nodes(1, msng_mnfst_set)
1499 add_extra_nodes(1, msng_mnfst_set)
1495 msng_mnfst_lst = msng_mnfst_set.keys()
1500 msng_mnfst_lst = msng_mnfst_set.keys()
1496 # Sort the manifestnodes by revision number.
1501 # Sort the manifestnodes by revision number.
1497 msng_mnfst_lst.sort(key=mnfst.rev)
1502 msng_mnfst_lst.sort(key=mnfst.rev)
1498 # Create a generator for the manifestnodes that calls our lookup
1503 # Create a generator for the manifestnodes that calls our lookup
1499 # and data collection functions back.
1504 # and data collection functions back.
1500 group = mnfst.group(msng_mnfst_lst,
1505 group = mnfst.group(msng_mnfst_lst,
1501 lambda mnode: msng_mnfst_set[mnode],
1506 lambda mnode: msng_mnfst_set[mnode],
1502 filenode_collector(changedfiles))
1507 filenode_collector(changedfiles))
1503 for cnt, chnk in enumerate(group):
1508 for cnt, chnk in enumerate(group):
1504 yield chnk
1509 yield chnk
1505 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1510 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1506 self.ui.progress(_('bundling manifests'), None)
1511 self.ui.progress(_('bundling manifests'), None)
1507
1512
1508 # These are no longer needed, dereference and toss the memory for
1513 # These are no longer needed, dereference and toss the memory for
1509 # them.
1514 # them.
1510 msng_mnfst_lst = None
1515 msng_mnfst_lst = None
1511 msng_mnfst_set.clear()
1516 msng_mnfst_set.clear()
1512
1517
1513 if extranodes:
1518 if extranodes:
1514 for fname in extranodes:
1519 for fname in extranodes:
1515 if isinstance(fname, int):
1520 if isinstance(fname, int):
1516 continue
1521 continue
1517 msng_filenode_set.setdefault(fname, {})
1522 msng_filenode_set.setdefault(fname, {})
1518 changedfiles.add(fname)
1523 changedfiles.add(fname)
1519 # Go through all our files in order sorted by name.
1524 # Go through all our files in order sorted by name.
1520 cnt = 0
1525 cnt = 0
1521 for fname in sorted(changedfiles):
1526 for fname in sorted(changedfiles):
1522 filerevlog = self.file(fname)
1527 filerevlog = self.file(fname)
1523 if not len(filerevlog):
1528 if not len(filerevlog):
1524 raise util.Abort(_("empty or missing revlog for %s") % fname)
1529 raise util.Abort(_("empty or missing revlog for %s") % fname)
1525 # Toss out the filenodes that the recipient isn't really
1530 # Toss out the filenodes that the recipient isn't really
1526 # missing.
1531 # missing.
1527 missingfnodes = msng_filenode_set.pop(fname, {})
1532 missingfnodes = msng_filenode_set.pop(fname, {})
1528 prune(filerevlog, missingfnodes)
1533 prune(filerevlog, missingfnodes)
1529 add_extra_nodes(fname, missingfnodes)
1534 add_extra_nodes(fname, missingfnodes)
1530 # If any filenodes are left, generate the group for them,
1535 # If any filenodes are left, generate the group for them,
1531 # otherwise don't bother.
1536 # otherwise don't bother.
1532 if missingfnodes:
1537 if missingfnodes:
1533 yield changegroup.chunkheader(len(fname))
1538 yield changegroup.chunkheader(len(fname))
1534 yield fname
1539 yield fname
1535 # Sort the filenodes by their revision # (topological order)
1540 # Sort the filenodes by their revision # (topological order)
1536 nodeiter = list(missingfnodes)
1541 nodeiter = list(missingfnodes)
1537 nodeiter.sort(key=filerevlog.rev)
1542 nodeiter.sort(key=filerevlog.rev)
1538 # Create a group generator and only pass in a changenode
1543 # Create a group generator and only pass in a changenode
1539 # lookup function as we need to collect no information
1544 # lookup function as we need to collect no information
1540 # from filenodes.
1545 # from filenodes.
1541 group = filerevlog.group(nodeiter,
1546 group = filerevlog.group(nodeiter,
1542 lambda fnode: missingfnodes[fnode])
1547 lambda fnode: missingfnodes[fnode])
1543 for chnk in group:
1548 for chnk in group:
1544 self.ui.progress(
1549 self.ui.progress(
1545 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1550 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1546 cnt += 1
1551 cnt += 1
1547 yield chnk
1552 yield chnk
1548 # Signal that no more groups are left.
1553 # Signal that no more groups are left.
1549 yield changegroup.closechunk()
1554 yield changegroup.closechunk()
1550 self.ui.progress(_('bundling files'), None)
1555 self.ui.progress(_('bundling files'), None)
1551
1556
1552 if msng_cl_lst:
1557 if msng_cl_lst:
1553 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1558 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1554
1559
1555 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1560 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1556
1561
1557 def changegroup(self, basenodes, source):
1562 def changegroup(self, basenodes, source):
1558 # to avoid a race we use changegroupsubset() (issue1320)
1563 # to avoid a race we use changegroupsubset() (issue1320)
1559 return self.changegroupsubset(basenodes, self.heads(), source)
1564 return self.changegroupsubset(basenodes, self.heads(), source)
1560
1565
1561 def _changegroup(self, nodes, source):
1566 def _changegroup(self, nodes, source):
1562 """Compute the changegroup of all nodes that we have that a recipient
1567 """Compute the changegroup of all nodes that we have that a recipient
1563 doesn't. Return a chunkbuffer object whose read() method will return
1568 doesn't. Return a chunkbuffer object whose read() method will return
1564 successive changegroup chunks.
1569 successive changegroup chunks.
1565
1570
1566 This is much easier than the previous function as we can assume that
1571 This is much easier than the previous function as we can assume that
1567 the recipient has any changenode we aren't sending them.
1572 the recipient has any changenode we aren't sending them.
1568
1573
1569 nodes is the set of nodes to send"""
1574 nodes is the set of nodes to send"""
1570
1575
1571 self.hook('preoutgoing', throw=True, source=source)
1576 self.hook('preoutgoing', throw=True, source=source)
1572
1577
1573 cl = self.changelog
1578 cl = self.changelog
1574 revset = set([cl.rev(n) for n in nodes])
1579 revset = set([cl.rev(n) for n in nodes])
1575 self.changegroupinfo(nodes, source)
1580 self.changegroupinfo(nodes, source)
1576
1581
1577 def identity(x):
1582 def identity(x):
1578 return x
1583 return x
1579
1584
1580 def gennodelst(log):
1585 def gennodelst(log):
1581 for r in log:
1586 for r in log:
1582 if log.linkrev(r) in revset:
1587 if log.linkrev(r) in revset:
1583 yield log.node(r)
1588 yield log.node(r)
1584
1589
1585 def lookuplinkrev_func(revlog):
1590 def lookuplinkrev_func(revlog):
1586 def lookuplinkrev(n):
1591 def lookuplinkrev(n):
1587 return cl.node(revlog.linkrev(revlog.rev(n)))
1592 return cl.node(revlog.linkrev(revlog.rev(n)))
1588 return lookuplinkrev
1593 return lookuplinkrev
1589
1594
1590 def gengroup():
1595 def gengroup():
1591 '''yield a sequence of changegroup chunks (strings)'''
1596 '''yield a sequence of changegroup chunks (strings)'''
1592 # construct a list of all changed files
1597 # construct a list of all changed files
1593 changedfiles = set()
1598 changedfiles = set()
1594 mmfs = {}
1599 mmfs = {}
1595 collect = changegroup.collector(cl, mmfs, changedfiles)
1600 collect = changegroup.collector(cl, mmfs, changedfiles)
1596
1601
1597 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1602 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1598 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1603 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1599 yield chnk
1604 yield chnk
1600 self.ui.progress(_('bundling changes'), None)
1605 self.ui.progress(_('bundling changes'), None)
1601
1606
1602 mnfst = self.manifest
1607 mnfst = self.manifest
1603 nodeiter = gennodelst(mnfst)
1608 nodeiter = gennodelst(mnfst)
1604 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1609 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1605 lookuplinkrev_func(mnfst))):
1610 lookuplinkrev_func(mnfst))):
1606 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1611 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1607 yield chnk
1612 yield chnk
1608 self.ui.progress(_('bundling manifests'), None)
1613 self.ui.progress(_('bundling manifests'), None)
1609
1614
1610 cnt = 0
1615 cnt = 0
1611 for fname in sorted(changedfiles):
1616 for fname in sorted(changedfiles):
1612 filerevlog = self.file(fname)
1617 filerevlog = self.file(fname)
1613 if not len(filerevlog):
1618 if not len(filerevlog):
1614 raise util.Abort(_("empty or missing revlog for %s") % fname)
1619 raise util.Abort(_("empty or missing revlog for %s") % fname)
1615 nodeiter = gennodelst(filerevlog)
1620 nodeiter = gennodelst(filerevlog)
1616 nodeiter = list(nodeiter)
1621 nodeiter = list(nodeiter)
1617 if nodeiter:
1622 if nodeiter:
1618 yield changegroup.chunkheader(len(fname))
1623 yield changegroup.chunkheader(len(fname))
1619 yield fname
1624 yield fname
1620 lookup = lookuplinkrev_func(filerevlog)
1625 lookup = lookuplinkrev_func(filerevlog)
1621 for chnk in filerevlog.group(nodeiter, lookup):
1626 for chnk in filerevlog.group(nodeiter, lookup):
1622 self.ui.progress(
1627 self.ui.progress(
1623 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1628 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1624 cnt += 1
1629 cnt += 1
1625 yield chnk
1630 yield chnk
1626 self.ui.progress(_('bundling files'), None)
1631 self.ui.progress(_('bundling files'), None)
1627
1632
1628 yield changegroup.closechunk()
1633 yield changegroup.closechunk()
1629
1634
1630 if nodes:
1635 if nodes:
1631 self.hook('outgoing', node=hex(nodes[0]), source=source)
1636 self.hook('outgoing', node=hex(nodes[0]), source=source)
1632
1637
1633 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1638 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1634
1639
1635 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1640 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1636 """Add the changegroup returned by source.read() to this repo.
1641 """Add the changegroup returned by source.read() to this repo.
1637 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1642 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1638 the URL of the repo where this changegroup is coming from.
1643 the URL of the repo where this changegroup is coming from.
1639
1644
1640 Return an integer summarizing the change to this repo:
1645 Return an integer summarizing the change to this repo:
1641 - nothing changed or no source: 0
1646 - nothing changed or no source: 0
1642 - more heads than before: 1+added heads (2..n)
1647 - more heads than before: 1+added heads (2..n)
1643 - fewer heads than before: -1-removed heads (-2..-n)
1648 - fewer heads than before: -1-removed heads (-2..-n)
1644 - number of heads stays the same: 1
1649 - number of heads stays the same: 1
1645 """
1650 """
1646 def csmap(x):
1651 def csmap(x):
1647 self.ui.debug("add changeset %s\n" % short(x))
1652 self.ui.debug("add changeset %s\n" % short(x))
1648 return len(cl)
1653 return len(cl)
1649
1654
1650 def revmap(x):
1655 def revmap(x):
1651 return cl.rev(x)
1656 return cl.rev(x)
1652
1657
1653 if not source:
1658 if not source:
1654 return 0
1659 return 0
1655
1660
1656 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1661 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1657
1662
1658 changesets = files = revisions = 0
1663 changesets = files = revisions = 0
1659 efiles = set()
1664 efiles = set()
1660
1665
1661 # write changelog data to temp files so concurrent readers will not see
1666 # write changelog data to temp files so concurrent readers will not see
1662 # inconsistent view
1667 # inconsistent view
1663 cl = self.changelog
1668 cl = self.changelog
1664 cl.delayupdate()
1669 cl.delayupdate()
1665 oldheads = len(cl.heads())
1670 oldheads = len(cl.heads())
1666
1671
1667 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1672 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1668 try:
1673 try:
1669 trp = weakref.proxy(tr)
1674 trp = weakref.proxy(tr)
1670 # pull off the changeset group
1675 # pull off the changeset group
1671 self.ui.status(_("adding changesets\n"))
1676 self.ui.status(_("adding changesets\n"))
1672 clstart = len(cl)
1677 clstart = len(cl)
1673 class prog(object):
1678 class prog(object):
1674 step = _('changesets')
1679 step = _('changesets')
1675 count = 1
1680 count = 1
1676 ui = self.ui
1681 ui = self.ui
1677 total = None
1682 total = None
1678 def __call__(self):
1683 def __call__(self):
1679 self.ui.progress(self.step, self.count, unit=_('chunks'),
1684 self.ui.progress(self.step, self.count, unit=_('chunks'),
1680 total=self.total)
1685 total=self.total)
1681 self.count += 1
1686 self.count += 1
1682 pr = prog()
1687 pr = prog()
1683 source.callback = pr
1688 source.callback = pr
1684
1689
1685 if (cl.addgroup(source, csmap, trp) is None
1690 if (cl.addgroup(source, csmap, trp) is None
1686 and not emptyok):
1691 and not emptyok):
1687 raise util.Abort(_("received changelog group is empty"))
1692 raise util.Abort(_("received changelog group is empty"))
1688 clend = len(cl)
1693 clend = len(cl)
1689 changesets = clend - clstart
1694 changesets = clend - clstart
1690 for c in xrange(clstart, clend):
1695 for c in xrange(clstart, clend):
1691 efiles.update(self[c].files())
1696 efiles.update(self[c].files())
1692 efiles = len(efiles)
1697 efiles = len(efiles)
1693 self.ui.progress(_('changesets'), None)
1698 self.ui.progress(_('changesets'), None)
1694
1699
1695 # pull off the manifest group
1700 # pull off the manifest group
1696 self.ui.status(_("adding manifests\n"))
1701 self.ui.status(_("adding manifests\n"))
1697 pr.step = _('manifests')
1702 pr.step = _('manifests')
1698 pr.count = 1
1703 pr.count = 1
1699 pr.total = changesets # manifests <= changesets
1704 pr.total = changesets # manifests <= changesets
1700 # no need to check for empty manifest group here:
1705 # no need to check for empty manifest group here:
1701 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1706 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1702 # no new manifest will be created and the manifest group will
1707 # no new manifest will be created and the manifest group will
1703 # be empty during the pull
1708 # be empty during the pull
1704 self.manifest.addgroup(source, revmap, trp)
1709 self.manifest.addgroup(source, revmap, trp)
1705 self.ui.progress(_('manifests'), None)
1710 self.ui.progress(_('manifests'), None)
1706
1711
1707 needfiles = {}
1712 needfiles = {}
1708 if self.ui.configbool('server', 'validate', default=False):
1713 if self.ui.configbool('server', 'validate', default=False):
1709 # validate incoming csets have their manifests
1714 # validate incoming csets have their manifests
1710 for cset in xrange(clstart, clend):
1715 for cset in xrange(clstart, clend):
1711 mfest = self.changelog.read(self.changelog.node(cset))[0]
1716 mfest = self.changelog.read(self.changelog.node(cset))[0]
1712 mfest = self.manifest.readdelta(mfest)
1717 mfest = self.manifest.readdelta(mfest)
1713 # store file nodes we must see
1718 # store file nodes we must see
1714 for f, n in mfest.iteritems():
1719 for f, n in mfest.iteritems():
1715 needfiles.setdefault(f, set()).add(n)
1720 needfiles.setdefault(f, set()).add(n)
1716
1721
1717 # process the files
1722 # process the files
1718 self.ui.status(_("adding file changes\n"))
1723 self.ui.status(_("adding file changes\n"))
1719 pr.step = 'files'
1724 pr.step = 'files'
1720 pr.count = 1
1725 pr.count = 1
1721 pr.total = efiles
1726 pr.total = efiles
1722 source.callback = None
1727 source.callback = None
1723
1728
1724 while 1:
1729 while 1:
1725 f = source.chunk()
1730 f = source.chunk()
1726 if not f:
1731 if not f:
1727 break
1732 break
1728 self.ui.debug("adding %s revisions\n" % f)
1733 self.ui.debug("adding %s revisions\n" % f)
1729 pr()
1734 pr()
1730 fl = self.file(f)
1735 fl = self.file(f)
1731 o = len(fl)
1736 o = len(fl)
1732 if fl.addgroup(source, revmap, trp) is None:
1737 if fl.addgroup(source, revmap, trp) is None:
1733 raise util.Abort(_("received file revlog group is empty"))
1738 raise util.Abort(_("received file revlog group is empty"))
1734 revisions += len(fl) - o
1739 revisions += len(fl) - o
1735 files += 1
1740 files += 1
1736 if f in needfiles:
1741 if f in needfiles:
1737 needs = needfiles[f]
1742 needs = needfiles[f]
1738 for new in xrange(o, len(fl)):
1743 for new in xrange(o, len(fl)):
1739 n = fl.node(new)
1744 n = fl.node(new)
1740 if n in needs:
1745 if n in needs:
1741 needs.remove(n)
1746 needs.remove(n)
1742 if not needs:
1747 if not needs:
1743 del needfiles[f]
1748 del needfiles[f]
1744 self.ui.progress(_('files'), None)
1749 self.ui.progress(_('files'), None)
1745
1750
1746 for f, needs in needfiles.iteritems():
1751 for f, needs in needfiles.iteritems():
1747 fl = self.file(f)
1752 fl = self.file(f)
1748 for n in needs:
1753 for n in needs:
1749 try:
1754 try:
1750 fl.rev(n)
1755 fl.rev(n)
1751 except error.LookupError:
1756 except error.LookupError:
1752 raise util.Abort(
1757 raise util.Abort(
1753 _('missing file data for %s:%s - run hg verify') %
1758 _('missing file data for %s:%s - run hg verify') %
1754 (f, hex(n)))
1759 (f, hex(n)))
1755
1760
1756 newheads = len(cl.heads())
1761 newheads = len(cl.heads())
1757 heads = ""
1762 heads = ""
1758 if oldheads and newheads != oldheads:
1763 if oldheads and newheads != oldheads:
1759 heads = _(" (%+d heads)") % (newheads - oldheads)
1764 heads = _(" (%+d heads)") % (newheads - oldheads)
1760
1765
1761 self.ui.status(_("added %d changesets"
1766 self.ui.status(_("added %d changesets"
1762 " with %d changes to %d files%s\n")
1767 " with %d changes to %d files%s\n")
1763 % (changesets, revisions, files, heads))
1768 % (changesets, revisions, files, heads))
1764
1769
1765 if changesets > 0:
1770 if changesets > 0:
1766 p = lambda: cl.writepending() and self.root or ""
1771 p = lambda: cl.writepending() and self.root or ""
1767 self.hook('pretxnchangegroup', throw=True,
1772 self.hook('pretxnchangegroup', throw=True,
1768 node=hex(cl.node(clstart)), source=srctype,
1773 node=hex(cl.node(clstart)), source=srctype,
1769 url=url, pending=p)
1774 url=url, pending=p)
1770
1775
1771 # make changelog see real files again
1776 # make changelog see real files again
1772 cl.finalize(trp)
1777 cl.finalize(trp)
1773
1778
1774 tr.close()
1779 tr.close()
1775 finally:
1780 finally:
1776 tr.release()
1781 tr.release()
1777 if lock:
1782 if lock:
1778 lock.release()
1783 lock.release()
1779
1784
1780 if changesets > 0:
1785 if changesets > 0:
1781 # forcefully update the on-disk branch cache
1786 # forcefully update the on-disk branch cache
1782 self.ui.debug("updating the branch cache\n")
1787 self.ui.debug("updating the branch cache\n")
1783 self.updatebranchcache()
1788 self.updatebranchcache()
1784 self.hook("changegroup", node=hex(cl.node(clstart)),
1789 self.hook("changegroup", node=hex(cl.node(clstart)),
1785 source=srctype, url=url)
1790 source=srctype, url=url)
1786
1791
1787 for i in xrange(clstart, clend):
1792 for i in xrange(clstart, clend):
1788 self.hook("incoming", node=hex(cl.node(i)),
1793 self.hook("incoming", node=hex(cl.node(i)),
1789 source=srctype, url=url)
1794 source=srctype, url=url)
1790
1795
1791 # never return 0 here:
1796 # never return 0 here:
1792 if newheads < oldheads:
1797 if newheads < oldheads:
1793 return newheads - oldheads - 1
1798 return newheads - oldheads - 1
1794 else:
1799 else:
1795 return newheads - oldheads + 1
1800 return newheads - oldheads + 1
1796
1801
1797
1802
1798 def stream_in(self, remote, requirements):
1803 def stream_in(self, remote, requirements):
1799 fp = remote.stream_out()
1804 fp = remote.stream_out()
1800 l = fp.readline()
1805 l = fp.readline()
1801 try:
1806 try:
1802 resp = int(l)
1807 resp = int(l)
1803 except ValueError:
1808 except ValueError:
1804 raise error.ResponseError(
1809 raise error.ResponseError(
1805 _('Unexpected response from remote server:'), l)
1810 _('Unexpected response from remote server:'), l)
1806 if resp == 1:
1811 if resp == 1:
1807 raise util.Abort(_('operation forbidden by server'))
1812 raise util.Abort(_('operation forbidden by server'))
1808 elif resp == 2:
1813 elif resp == 2:
1809 raise util.Abort(_('locking the remote repository failed'))
1814 raise util.Abort(_('locking the remote repository failed'))
1810 elif resp != 0:
1815 elif resp != 0:
1811 raise util.Abort(_('the server sent an unknown error code'))
1816 raise util.Abort(_('the server sent an unknown error code'))
1812 self.ui.status(_('streaming all changes\n'))
1817 self.ui.status(_('streaming all changes\n'))
1813 l = fp.readline()
1818 l = fp.readline()
1814 try:
1819 try:
1815 total_files, total_bytes = map(int, l.split(' ', 1))
1820 total_files, total_bytes = map(int, l.split(' ', 1))
1816 except (ValueError, TypeError):
1821 except (ValueError, TypeError):
1817 raise error.ResponseError(
1822 raise error.ResponseError(
1818 _('Unexpected response from remote server:'), l)
1823 _('Unexpected response from remote server:'), l)
1819 self.ui.status(_('%d files to transfer, %s of data\n') %
1824 self.ui.status(_('%d files to transfer, %s of data\n') %
1820 (total_files, util.bytecount(total_bytes)))
1825 (total_files, util.bytecount(total_bytes)))
1821 start = time.time()
1826 start = time.time()
1822 for i in xrange(total_files):
1827 for i in xrange(total_files):
1823 # XXX doesn't support '\n' or '\r' in filenames
1828 # XXX doesn't support '\n' or '\r' in filenames
1824 l = fp.readline()
1829 l = fp.readline()
1825 try:
1830 try:
1826 name, size = l.split('\0', 1)
1831 name, size = l.split('\0', 1)
1827 size = int(size)
1832 size = int(size)
1828 except (ValueError, TypeError):
1833 except (ValueError, TypeError):
1829 raise error.ResponseError(
1834 raise error.ResponseError(
1830 _('Unexpected response from remote server:'), l)
1835 _('Unexpected response from remote server:'), l)
1831 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1836 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1832 # for backwards compat, name was partially encoded
1837 # for backwards compat, name was partially encoded
1833 ofp = self.sopener(store.decodedir(name), 'w')
1838 ofp = self.sopener(store.decodedir(name), 'w')
1834 for chunk in util.filechunkiter(fp, limit=size):
1839 for chunk in util.filechunkiter(fp, limit=size):
1835 ofp.write(chunk)
1840 ofp.write(chunk)
1836 ofp.close()
1841 ofp.close()
1837 elapsed = time.time() - start
1842 elapsed = time.time() - start
1838 if elapsed <= 0:
1843 if elapsed <= 0:
1839 elapsed = 0.001
1844 elapsed = 0.001
1840 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1845 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1841 (util.bytecount(total_bytes), elapsed,
1846 (util.bytecount(total_bytes), elapsed,
1842 util.bytecount(total_bytes / elapsed)))
1847 util.bytecount(total_bytes / elapsed)))
1843
1848
1844 # new requirements = old non-format requirements + new format-related
1849 # new requirements = old non-format requirements + new format-related
1845 # requirements from the streamed-in repository
1850 # requirements from the streamed-in repository
1846 requirements.update(set(self.requirements) - self.supportedformats)
1851 requirements.update(set(self.requirements) - self.supportedformats)
1847 self._applyrequirements(requirements)
1852 self._applyrequirements(requirements)
1848 self._writerequirements()
1853 self._writerequirements()
1849
1854
1850 self.invalidate()
1855 self.invalidate()
1851 return len(self.heads()) + 1
1856 return len(self.heads()) + 1
1852
1857
1853 def clone(self, remote, heads=[], stream=False):
1858 def clone(self, remote, heads=[], stream=False):
1854 '''clone remote repository.
1859 '''clone remote repository.
1855
1860
1856 keyword arguments:
1861 keyword arguments:
1857 heads: list of revs to clone (forces use of pull)
1862 heads: list of revs to clone (forces use of pull)
1858 stream: use streaming clone if possible'''
1863 stream: use streaming clone if possible'''
1859
1864
1860 # now, all clients that can request uncompressed clones can
1865 # now, all clients that can request uncompressed clones can
1861 # read repo formats supported by all servers that can serve
1866 # read repo formats supported by all servers that can serve
1862 # them.
1867 # them.
1863
1868
1864 # if revlog format changes, client will have to check version
1869 # if revlog format changes, client will have to check version
1865 # and format flags on "stream" capability, and use
1870 # and format flags on "stream" capability, and use
1866 # uncompressed only if compatible.
1871 # uncompressed only if compatible.
1867
1872
1868 if stream and not heads:
1873 if stream and not heads:
1869 # 'stream' means remote revlog format is revlogv1 only
1874 # 'stream' means remote revlog format is revlogv1 only
1870 if remote.capable('stream'):
1875 if remote.capable('stream'):
1871 return self.stream_in(remote, set(('revlogv1',)))
1876 return self.stream_in(remote, set(('revlogv1',)))
1872 # otherwise, 'streamreqs' contains the remote revlog format
1877 # otherwise, 'streamreqs' contains the remote revlog format
1873 streamreqs = remote.capable('streamreqs')
1878 streamreqs = remote.capable('streamreqs')
1874 if streamreqs:
1879 if streamreqs:
1875 streamreqs = set(streamreqs.split(','))
1880 streamreqs = set(streamreqs.split(','))
1876 # if we support it, stream in and adjust our requirements
1881 # if we support it, stream in and adjust our requirements
1877 if not streamreqs - self.supportedformats:
1882 if not streamreqs - self.supportedformats:
1878 return self.stream_in(remote, streamreqs)
1883 return self.stream_in(remote, streamreqs)
1879 return self.pull(remote, heads)
1884 return self.pull(remote, heads)
1880
1885
1881 def pushkey(self, namespace, key, old, new):
1886 def pushkey(self, namespace, key, old, new):
1882 return pushkey.push(self, namespace, key, old, new)
1887 return pushkey.push(self, namespace, key, old, new)
1883
1888
1884 def listkeys(self, namespace):
1889 def listkeys(self, namespace):
1885 return pushkey.list(self, namespace)
1890 return pushkey.list(self, namespace)
1886
1891
1887 # used to avoid circular references so destructors work
1892 # used to avoid circular references so destructors work
1888 def aftertrans(files):
1893 def aftertrans(files):
1889 renamefiles = [tuple(t) for t in files]
1894 renamefiles = [tuple(t) for t in files]
1890 def a():
1895 def a():
1891 for src, dest in renamefiles:
1896 for src, dest in renamefiles:
1892 util.rename(src, dest)
1897 util.rename(src, dest)
1893 return a
1898 return a
1894
1899
1895 def instance(ui, path, create):
1900 def instance(ui, path, create):
1896 return localrepository(ui, util.drop_scheme('file', path), create)
1901 return localrepository(ui, util.drop_scheme('file', path), create)
1897
1902
1898 def islocal(path):
1903 def islocal(path):
1899 return True
1904 return True
General Comments 0
You need to be logged in to leave comments. Login now