##// END OF EJS Templates
dirstate.walk: push sorting up
Matt Mackall -
r6827:c978d675 default
parent child Browse files
Show More
@@ -1,774 +1,774
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, revlog, util, os, errno
10 import ancestor, bdiff, revlog, util, os, errno
11
11
12 class changectx(object):
12 class changectx(object):
13 """A changecontext object makes access to data related to a particular
13 """A changecontext object makes access to data related to a particular
14 changeset convenient."""
14 changeset convenient."""
15 def __init__(self, repo, changeid=''):
15 def __init__(self, repo, changeid=''):
16 """changeid is a revision number, node, or tag"""
16 """changeid is a revision number, node, or tag"""
17 if changeid == '':
17 if changeid == '':
18 changeid = '.'
18 changeid = '.'
19 self._repo = repo
19 self._repo = repo
20 self._node = self._repo.lookup(changeid)
20 self._node = self._repo.lookup(changeid)
21 self._rev = self._repo.changelog.rev(self._node)
21 self._rev = self._repo.changelog.rev(self._node)
22
22
23 def __str__(self):
23 def __str__(self):
24 return short(self.node())
24 return short(self.node())
25
25
26 def __int__(self):
26 def __int__(self):
27 return self.rev()
27 return self.rev()
28
28
29 def __repr__(self):
29 def __repr__(self):
30 return "<changectx %s>" % str(self)
30 return "<changectx %s>" % str(self)
31
31
32 def __hash__(self):
32 def __hash__(self):
33 try:
33 try:
34 return hash(self._rev)
34 return hash(self._rev)
35 except AttributeError:
35 except AttributeError:
36 return id(self)
36 return id(self)
37
37
38 def __eq__(self, other):
38 def __eq__(self, other):
39 try:
39 try:
40 return self._rev == other._rev
40 return self._rev == other._rev
41 except AttributeError:
41 except AttributeError:
42 return False
42 return False
43
43
44 def __ne__(self, other):
44 def __ne__(self, other):
45 return not (self == other)
45 return not (self == other)
46
46
47 def __nonzero__(self):
47 def __nonzero__(self):
48 return self._rev != nullrev
48 return self._rev != nullrev
49
49
50 def __getattr__(self, name):
50 def __getattr__(self, name):
51 if name == '_changeset':
51 if name == '_changeset':
52 self._changeset = self._repo.changelog.read(self.node())
52 self._changeset = self._repo.changelog.read(self.node())
53 return self._changeset
53 return self._changeset
54 elif name == '_manifest':
54 elif name == '_manifest':
55 self._manifest = self._repo.manifest.read(self._changeset[0])
55 self._manifest = self._repo.manifest.read(self._changeset[0])
56 return self._manifest
56 return self._manifest
57 elif name == '_manifestdelta':
57 elif name == '_manifestdelta':
58 md = self._repo.manifest.readdelta(self._changeset[0])
58 md = self._repo.manifest.readdelta(self._changeset[0])
59 self._manifestdelta = md
59 self._manifestdelta = md
60 return self._manifestdelta
60 return self._manifestdelta
61 elif name == '_parents':
61 elif name == '_parents':
62 p = self._repo.changelog.parents(self._node)
62 p = self._repo.changelog.parents(self._node)
63 if p[1] == nullid:
63 if p[1] == nullid:
64 p = p[:-1]
64 p = p[:-1]
65 self._parents = [changectx(self._repo, x) for x in p]
65 self._parents = [changectx(self._repo, x) for x in p]
66 return self._parents
66 return self._parents
67 else:
67 else:
68 raise AttributeError, name
68 raise AttributeError, name
69
69
70 def __contains__(self, key):
70 def __contains__(self, key):
71 return key in self._manifest
71 return key in self._manifest
72
72
73 def __getitem__(self, key):
73 def __getitem__(self, key):
74 return self.filectx(key)
74 return self.filectx(key)
75
75
76 def __iter__(self):
76 def __iter__(self):
77 for f in util.sort(self._manifest):
77 for f in util.sort(self._manifest):
78 yield f
78 yield f
79
79
80 def changeset(self): return self._changeset
80 def changeset(self): return self._changeset
81 def manifest(self): return self._manifest
81 def manifest(self): return self._manifest
82
82
83 def rev(self): return self._rev
83 def rev(self): return self._rev
84 def node(self): return self._node
84 def node(self): return self._node
85 def hex(self): return hex(self._node)
85 def hex(self): return hex(self._node)
86 def user(self): return self._changeset[1]
86 def user(self): return self._changeset[1]
87 def date(self): return self._changeset[2]
87 def date(self): return self._changeset[2]
88 def files(self): return self._changeset[3]
88 def files(self): return self._changeset[3]
89 def description(self): return self._changeset[4]
89 def description(self): return self._changeset[4]
90 def branch(self): return self._changeset[5].get("branch")
90 def branch(self): return self._changeset[5].get("branch")
91 def extra(self): return self._changeset[5]
91 def extra(self): return self._changeset[5]
92 def tags(self): return self._repo.nodetags(self._node)
92 def tags(self): return self._repo.nodetags(self._node)
93
93
94 def parents(self):
94 def parents(self):
95 """return contexts for each parent changeset"""
95 """return contexts for each parent changeset"""
96 return self._parents
96 return self._parents
97
97
98 def children(self):
98 def children(self):
99 """return contexts for each child changeset"""
99 """return contexts for each child changeset"""
100 c = self._repo.changelog.children(self._node)
100 c = self._repo.changelog.children(self._node)
101 return [changectx(self._repo, x) for x in c]
101 return [changectx(self._repo, x) for x in c]
102
102
103 def _fileinfo(self, path):
103 def _fileinfo(self, path):
104 if '_manifest' in self.__dict__:
104 if '_manifest' in self.__dict__:
105 try:
105 try:
106 return self._manifest[path], self._manifest.flags(path)
106 return self._manifest[path], self._manifest.flags(path)
107 except KeyError:
107 except KeyError:
108 raise revlog.LookupError(self._node, path,
108 raise revlog.LookupError(self._node, path,
109 _('not found in manifest'))
109 _('not found in manifest'))
110 if '_manifestdelta' in self.__dict__ or path in self.files():
110 if '_manifestdelta' in self.__dict__ or path in self.files():
111 if path in self._manifestdelta:
111 if path in self._manifestdelta:
112 return self._manifestdelta[path], self._manifestdelta.flags(path)
112 return self._manifestdelta[path], self._manifestdelta.flags(path)
113 node, flag = self._repo.manifest.find(self._changeset[0], path)
113 node, flag = self._repo.manifest.find(self._changeset[0], path)
114 if not node:
114 if not node:
115 raise revlog.LookupError(self._node, path,
115 raise revlog.LookupError(self._node, path,
116 _('not found in manifest'))
116 _('not found in manifest'))
117
117
118 return node, flag
118 return node, flag
119
119
120 def filenode(self, path):
120 def filenode(self, path):
121 return self._fileinfo(path)[0]
121 return self._fileinfo(path)[0]
122
122
123 def flags(self, path):
123 def flags(self, path):
124 try:
124 try:
125 return self._fileinfo(path)[1]
125 return self._fileinfo(path)[1]
126 except revlog.LookupError:
126 except revlog.LookupError:
127 return ''
127 return ''
128
128
129 def filectx(self, path, fileid=None, filelog=None):
129 def filectx(self, path, fileid=None, filelog=None):
130 """get a file context from this changeset"""
130 """get a file context from this changeset"""
131 if fileid is None:
131 if fileid is None:
132 fileid = self.filenode(path)
132 fileid = self.filenode(path)
133 return filectx(self._repo, path, fileid=fileid,
133 return filectx(self._repo, path, fileid=fileid,
134 changectx=self, filelog=filelog)
134 changectx=self, filelog=filelog)
135
135
136 def filectxs(self):
136 def filectxs(self):
137 """generate a file context for each file in this changeset's
137 """generate a file context for each file in this changeset's
138 manifest"""
138 manifest"""
139 for f in util.sort(mf):
139 for f in util.sort(mf):
140 yield self.filectx(f, fileid=mf[f])
140 yield self.filectx(f, fileid=mf[f])
141
141
142 def ancestor(self, c2):
142 def ancestor(self, c2):
143 """
143 """
144 return the ancestor context of self and c2
144 return the ancestor context of self and c2
145 """
145 """
146 n = self._repo.changelog.ancestor(self._node, c2._node)
146 n = self._repo.changelog.ancestor(self._node, c2._node)
147 return changectx(self._repo, n)
147 return changectx(self._repo, n)
148
148
149 def walk(self, match):
149 def walk(self, match):
150 fdict = dict.fromkeys(match.files())
150 fdict = dict.fromkeys(match.files())
151 # for dirstate.walk, files=['.'] means "walk the whole tree".
151 # for dirstate.walk, files=['.'] means "walk the whole tree".
152 # follow that here, too
152 # follow that here, too
153 fdict.pop('.', None)
153 fdict.pop('.', None)
154 for fn in self:
154 for fn in self:
155 for ffn in fdict:
155 for ffn in fdict:
156 # match if the file is the exact name or a directory
156 # match if the file is the exact name or a directory
157 if ffn == fn or fn.startswith("%s/" % ffn):
157 if ffn == fn or fn.startswith("%s/" % ffn):
158 del fdict[ffn]
158 del fdict[ffn]
159 break
159 break
160 if match(fn):
160 if match(fn):
161 yield fn
161 yield fn
162 for fn in util.sort(fdict):
162 for fn in util.sort(fdict):
163 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
163 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
164 yield fn
164 yield fn
165
165
166 class filectx(object):
166 class filectx(object):
167 """A filecontext object makes access to data related to a particular
167 """A filecontext object makes access to data related to a particular
168 filerevision convenient."""
168 filerevision convenient."""
169 def __init__(self, repo, path, changeid=None, fileid=None,
169 def __init__(self, repo, path, changeid=None, fileid=None,
170 filelog=None, changectx=None):
170 filelog=None, changectx=None):
171 """changeid can be a changeset revision, node, or tag.
171 """changeid can be a changeset revision, node, or tag.
172 fileid can be a file revision or node."""
172 fileid can be a file revision or node."""
173 self._repo = repo
173 self._repo = repo
174 self._path = path
174 self._path = path
175
175
176 assert (changeid is not None
176 assert (changeid is not None
177 or fileid is not None
177 or fileid is not None
178 or changectx is not None)
178 or changectx is not None)
179
179
180 if filelog:
180 if filelog:
181 self._filelog = filelog
181 self._filelog = filelog
182
182
183 if changeid is not None:
183 if changeid is not None:
184 self._changeid = changeid
184 self._changeid = changeid
185 if changectx is not None:
185 if changectx is not None:
186 self._changectx = changectx
186 self._changectx = changectx
187 if fileid is not None:
187 if fileid is not None:
188 self._fileid = fileid
188 self._fileid = fileid
189
189
190 def __getattr__(self, name):
190 def __getattr__(self, name):
191 if name == '_changectx':
191 if name == '_changectx':
192 self._changectx = changectx(self._repo, self._changeid)
192 self._changectx = changectx(self._repo, self._changeid)
193 return self._changectx
193 return self._changectx
194 elif name == '_filelog':
194 elif name == '_filelog':
195 self._filelog = self._repo.file(self._path)
195 self._filelog = self._repo.file(self._path)
196 return self._filelog
196 return self._filelog
197 elif name == '_changeid':
197 elif name == '_changeid':
198 if '_changectx' in self.__dict__:
198 if '_changectx' in self.__dict__:
199 self._changeid = self._changectx.rev()
199 self._changeid = self._changectx.rev()
200 else:
200 else:
201 self._changeid = self._filelog.linkrev(self._filenode)
201 self._changeid = self._filelog.linkrev(self._filenode)
202 return self._changeid
202 return self._changeid
203 elif name == '_filenode':
203 elif name == '_filenode':
204 if '_fileid' in self.__dict__:
204 if '_fileid' in self.__dict__:
205 self._filenode = self._filelog.lookup(self._fileid)
205 self._filenode = self._filelog.lookup(self._fileid)
206 else:
206 else:
207 self._filenode = self._changectx.filenode(self._path)
207 self._filenode = self._changectx.filenode(self._path)
208 return self._filenode
208 return self._filenode
209 elif name == '_filerev':
209 elif name == '_filerev':
210 self._filerev = self._filelog.rev(self._filenode)
210 self._filerev = self._filelog.rev(self._filenode)
211 return self._filerev
211 return self._filerev
212 elif name == '_repopath':
212 elif name == '_repopath':
213 self._repopath = self._path
213 self._repopath = self._path
214 return self._repopath
214 return self._repopath
215 else:
215 else:
216 raise AttributeError, name
216 raise AttributeError, name
217
217
218 def __nonzero__(self):
218 def __nonzero__(self):
219 try:
219 try:
220 n = self._filenode
220 n = self._filenode
221 return True
221 return True
222 except revlog.LookupError:
222 except revlog.LookupError:
223 # file is missing
223 # file is missing
224 return False
224 return False
225
225
226 def __str__(self):
226 def __str__(self):
227 return "%s@%s" % (self.path(), short(self.node()))
227 return "%s@%s" % (self.path(), short(self.node()))
228
228
229 def __repr__(self):
229 def __repr__(self):
230 return "<filectx %s>" % str(self)
230 return "<filectx %s>" % str(self)
231
231
232 def __hash__(self):
232 def __hash__(self):
233 try:
233 try:
234 return hash((self._path, self._fileid))
234 return hash((self._path, self._fileid))
235 except AttributeError:
235 except AttributeError:
236 return id(self)
236 return id(self)
237
237
238 def __eq__(self, other):
238 def __eq__(self, other):
239 try:
239 try:
240 return (self._path == other._path
240 return (self._path == other._path
241 and self._fileid == other._fileid)
241 and self._fileid == other._fileid)
242 except AttributeError:
242 except AttributeError:
243 return False
243 return False
244
244
245 def __ne__(self, other):
245 def __ne__(self, other):
246 return not (self == other)
246 return not (self == other)
247
247
248 def filectx(self, fileid):
248 def filectx(self, fileid):
249 '''opens an arbitrary revision of the file without
249 '''opens an arbitrary revision of the file without
250 opening a new filelog'''
250 opening a new filelog'''
251 return filectx(self._repo, self._path, fileid=fileid,
251 return filectx(self._repo, self._path, fileid=fileid,
252 filelog=self._filelog)
252 filelog=self._filelog)
253
253
254 def filerev(self): return self._filerev
254 def filerev(self): return self._filerev
255 def filenode(self): return self._filenode
255 def filenode(self): return self._filenode
256 def flags(self): return self._changectx.flags(self._path)
256 def flags(self): return self._changectx.flags(self._path)
257 def filelog(self): return self._filelog
257 def filelog(self): return self._filelog
258
258
259 def rev(self):
259 def rev(self):
260 if '_changectx' in self.__dict__:
260 if '_changectx' in self.__dict__:
261 return self._changectx.rev()
261 return self._changectx.rev()
262 if '_changeid' in self.__dict__:
262 if '_changeid' in self.__dict__:
263 return self._changectx.rev()
263 return self._changectx.rev()
264 return self._filelog.linkrev(self._filenode)
264 return self._filelog.linkrev(self._filenode)
265
265
266 def linkrev(self): return self._filelog.linkrev(self._filenode)
266 def linkrev(self): return self._filelog.linkrev(self._filenode)
267 def node(self): return self._changectx.node()
267 def node(self): return self._changectx.node()
268 def user(self): return self._changectx.user()
268 def user(self): return self._changectx.user()
269 def date(self): return self._changectx.date()
269 def date(self): return self._changectx.date()
270 def files(self): return self._changectx.files()
270 def files(self): return self._changectx.files()
271 def description(self): return self._changectx.description()
271 def description(self): return self._changectx.description()
272 def branch(self): return self._changectx.branch()
272 def branch(self): return self._changectx.branch()
273 def manifest(self): return self._changectx.manifest()
273 def manifest(self): return self._changectx.manifest()
274 def changectx(self): return self._changectx
274 def changectx(self): return self._changectx
275
275
276 def data(self): return self._filelog.read(self._filenode)
276 def data(self): return self._filelog.read(self._filenode)
277 def path(self): return self._path
277 def path(self): return self._path
278 def size(self): return self._filelog.size(self._filerev)
278 def size(self): return self._filelog.size(self._filerev)
279
279
280 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
280 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
281
281
282 def renamed(self):
282 def renamed(self):
283 """check if file was actually renamed in this changeset revision
283 """check if file was actually renamed in this changeset revision
284
284
285 If rename logged in file revision, we report copy for changeset only
285 If rename logged in file revision, we report copy for changeset only
286 if file revisions linkrev points back to the changeset in question
286 if file revisions linkrev points back to the changeset in question
287 or both changeset parents contain different file revisions.
287 or both changeset parents contain different file revisions.
288 """
288 """
289
289
290 renamed = self._filelog.renamed(self._filenode)
290 renamed = self._filelog.renamed(self._filenode)
291 if not renamed:
291 if not renamed:
292 return renamed
292 return renamed
293
293
294 if self.rev() == self.linkrev():
294 if self.rev() == self.linkrev():
295 return renamed
295 return renamed
296
296
297 name = self.path()
297 name = self.path()
298 fnode = self._filenode
298 fnode = self._filenode
299 for p in self._changectx.parents():
299 for p in self._changectx.parents():
300 try:
300 try:
301 if fnode == p.filenode(name):
301 if fnode == p.filenode(name):
302 return None
302 return None
303 except revlog.LookupError:
303 except revlog.LookupError:
304 pass
304 pass
305 return renamed
305 return renamed
306
306
307 def parents(self):
307 def parents(self):
308 p = self._path
308 p = self._path
309 fl = self._filelog
309 fl = self._filelog
310 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
310 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
311
311
312 r = self._filelog.renamed(self._filenode)
312 r = self._filelog.renamed(self._filenode)
313 if r:
313 if r:
314 pl[0] = (r[0], r[1], None)
314 pl[0] = (r[0], r[1], None)
315
315
316 return [filectx(self._repo, p, fileid=n, filelog=l)
316 return [filectx(self._repo, p, fileid=n, filelog=l)
317 for p,n,l in pl if n != nullid]
317 for p,n,l in pl if n != nullid]
318
318
319 def children(self):
319 def children(self):
320 # hard for renames
320 # hard for renames
321 c = self._filelog.children(self._filenode)
321 c = self._filelog.children(self._filenode)
322 return [filectx(self._repo, self._path, fileid=x,
322 return [filectx(self._repo, self._path, fileid=x,
323 filelog=self._filelog) for x in c]
323 filelog=self._filelog) for x in c]
324
324
325 def annotate(self, follow=False, linenumber=None):
325 def annotate(self, follow=False, linenumber=None):
326 '''returns a list of tuples of (ctx, line) for each line
326 '''returns a list of tuples of (ctx, line) for each line
327 in the file, where ctx is the filectx of the node where
327 in the file, where ctx is the filectx of the node where
328 that line was last changed.
328 that line was last changed.
329 This returns tuples of ((ctx, linenumber), line) for each line,
329 This returns tuples of ((ctx, linenumber), line) for each line,
330 if "linenumber" parameter is NOT "None".
330 if "linenumber" parameter is NOT "None".
331 In such tuples, linenumber means one at the first appearance
331 In such tuples, linenumber means one at the first appearance
332 in the managed file.
332 in the managed file.
333 To reduce annotation cost,
333 To reduce annotation cost,
334 this returns fixed value(False is used) as linenumber,
334 this returns fixed value(False is used) as linenumber,
335 if "linenumber" parameter is "False".'''
335 if "linenumber" parameter is "False".'''
336
336
337 def decorate_compat(text, rev):
337 def decorate_compat(text, rev):
338 return ([rev] * len(text.splitlines()), text)
338 return ([rev] * len(text.splitlines()), text)
339
339
340 def without_linenumber(text, rev):
340 def without_linenumber(text, rev):
341 return ([(rev, False)] * len(text.splitlines()), text)
341 return ([(rev, False)] * len(text.splitlines()), text)
342
342
343 def with_linenumber(text, rev):
343 def with_linenumber(text, rev):
344 size = len(text.splitlines())
344 size = len(text.splitlines())
345 return ([(rev, i) for i in xrange(1, size + 1)], text)
345 return ([(rev, i) for i in xrange(1, size + 1)], text)
346
346
347 decorate = (((linenumber is None) and decorate_compat) or
347 decorate = (((linenumber is None) and decorate_compat) or
348 (linenumber and with_linenumber) or
348 (linenumber and with_linenumber) or
349 without_linenumber)
349 without_linenumber)
350
350
351 def pair(parent, child):
351 def pair(parent, child):
352 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
352 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
353 child[0][b1:b2] = parent[0][a1:a2]
353 child[0][b1:b2] = parent[0][a1:a2]
354 return child
354 return child
355
355
356 getlog = util.cachefunc(lambda x: self._repo.file(x))
356 getlog = util.cachefunc(lambda x: self._repo.file(x))
357 def getctx(path, fileid):
357 def getctx(path, fileid):
358 log = path == self._path and self._filelog or getlog(path)
358 log = path == self._path and self._filelog or getlog(path)
359 return filectx(self._repo, path, fileid=fileid, filelog=log)
359 return filectx(self._repo, path, fileid=fileid, filelog=log)
360 getctx = util.cachefunc(getctx)
360 getctx = util.cachefunc(getctx)
361
361
362 def parents(f):
362 def parents(f):
363 # we want to reuse filectx objects as much as possible
363 # we want to reuse filectx objects as much as possible
364 p = f._path
364 p = f._path
365 if f._filerev is None: # working dir
365 if f._filerev is None: # working dir
366 pl = [(n.path(), n.filerev()) for n in f.parents()]
366 pl = [(n.path(), n.filerev()) for n in f.parents()]
367 else:
367 else:
368 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
368 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
369
369
370 if follow:
370 if follow:
371 r = f.renamed()
371 r = f.renamed()
372 if r:
372 if r:
373 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
373 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
374
374
375 return [getctx(p, n) for p, n in pl if n != nullrev]
375 return [getctx(p, n) for p, n in pl if n != nullrev]
376
376
377 # use linkrev to find the first changeset where self appeared
377 # use linkrev to find the first changeset where self appeared
378 if self.rev() != self.linkrev():
378 if self.rev() != self.linkrev():
379 base = self.filectx(self.filerev())
379 base = self.filectx(self.filerev())
380 else:
380 else:
381 base = self
381 base = self
382
382
383 # find all ancestors
383 # find all ancestors
384 needed = {base: 1}
384 needed = {base: 1}
385 visit = [base]
385 visit = [base]
386 files = [base._path]
386 files = [base._path]
387 while visit:
387 while visit:
388 f = visit.pop(0)
388 f = visit.pop(0)
389 for p in parents(f):
389 for p in parents(f):
390 if p not in needed:
390 if p not in needed:
391 needed[p] = 1
391 needed[p] = 1
392 visit.append(p)
392 visit.append(p)
393 if p._path not in files:
393 if p._path not in files:
394 files.append(p._path)
394 files.append(p._path)
395 else:
395 else:
396 # count how many times we'll use this
396 # count how many times we'll use this
397 needed[p] += 1
397 needed[p] += 1
398
398
399 # sort by revision (per file) which is a topological order
399 # sort by revision (per file) which is a topological order
400 visit = []
400 visit = []
401 for f in files:
401 for f in files:
402 fn = [(n.rev(), n) for n in needed if n._path == f]
402 fn = [(n.rev(), n) for n in needed if n._path == f]
403 visit.extend(fn)
403 visit.extend(fn)
404
404
405 hist = {}
405 hist = {}
406 for r, f in util.sort(visit):
406 for r, f in util.sort(visit):
407 curr = decorate(f.data(), f)
407 curr = decorate(f.data(), f)
408 for p in parents(f):
408 for p in parents(f):
409 if p != nullid:
409 if p != nullid:
410 curr = pair(hist[p], curr)
410 curr = pair(hist[p], curr)
411 # trim the history of unneeded revs
411 # trim the history of unneeded revs
412 needed[p] -= 1
412 needed[p] -= 1
413 if not needed[p]:
413 if not needed[p]:
414 del hist[p]
414 del hist[p]
415 hist[f] = curr
415 hist[f] = curr
416
416
417 return zip(hist[f][0], hist[f][1].splitlines(1))
417 return zip(hist[f][0], hist[f][1].splitlines(1))
418
418
419 def ancestor(self, fc2):
419 def ancestor(self, fc2):
420 """
420 """
421 find the common ancestor file context, if any, of self, and fc2
421 find the common ancestor file context, if any, of self, and fc2
422 """
422 """
423
423
424 acache = {}
424 acache = {}
425
425
426 # prime the ancestor cache for the working directory
426 # prime the ancestor cache for the working directory
427 for c in (self, fc2):
427 for c in (self, fc2):
428 if c._filerev == None:
428 if c._filerev == None:
429 pl = [(n.path(), n.filenode()) for n in c.parents()]
429 pl = [(n.path(), n.filenode()) for n in c.parents()]
430 acache[(c._path, None)] = pl
430 acache[(c._path, None)] = pl
431
431
432 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
432 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
433 def parents(vertex):
433 def parents(vertex):
434 if vertex in acache:
434 if vertex in acache:
435 return acache[vertex]
435 return acache[vertex]
436 f, n = vertex
436 f, n = vertex
437 if f not in flcache:
437 if f not in flcache:
438 flcache[f] = self._repo.file(f)
438 flcache[f] = self._repo.file(f)
439 fl = flcache[f]
439 fl = flcache[f]
440 pl = [(f, p) for p in fl.parents(n) if p != nullid]
440 pl = [(f, p) for p in fl.parents(n) if p != nullid]
441 re = fl.renamed(n)
441 re = fl.renamed(n)
442 if re:
442 if re:
443 pl.append(re)
443 pl.append(re)
444 acache[vertex] = pl
444 acache[vertex] = pl
445 return pl
445 return pl
446
446
447 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
447 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
448 v = ancestor.ancestor(a, b, parents)
448 v = ancestor.ancestor(a, b, parents)
449 if v:
449 if v:
450 f, n = v
450 f, n = v
451 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
451 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
452
452
453 return None
453 return None
454
454
455 class workingctx(changectx):
455 class workingctx(changectx):
456 """A workingctx object makes access to data related to
456 """A workingctx object makes access to data related to
457 the current working directory convenient.
457 the current working directory convenient.
458 parents - a pair of parent nodeids, or None to use the dirstate.
458 parents - a pair of parent nodeids, or None to use the dirstate.
459 date - any valid date string or (unixtime, offset), or None.
459 date - any valid date string or (unixtime, offset), or None.
460 user - username string, or None.
460 user - username string, or None.
461 extra - a dictionary of extra values, or None.
461 extra - a dictionary of extra values, or None.
462 changes - a list of file lists as returned by localrepo.status()
462 changes - a list of file lists as returned by localrepo.status()
463 or None to use the repository status.
463 or None to use the repository status.
464 """
464 """
465 def __init__(self, repo, parents=None, text="", user=None, date=None,
465 def __init__(self, repo, parents=None, text="", user=None, date=None,
466 extra=None, changes=None):
466 extra=None, changes=None):
467 self._repo = repo
467 self._repo = repo
468 self._rev = None
468 self._rev = None
469 self._node = None
469 self._node = None
470 self._text = text
470 self._text = text
471 if date:
471 if date:
472 self._date = util.parsedate(date)
472 self._date = util.parsedate(date)
473 if user:
473 if user:
474 self._user = user
474 self._user = user
475 if parents:
475 if parents:
476 self._parents = [changectx(self._repo, p) for p in parents]
476 self._parents = [changectx(self._repo, p) for p in parents]
477 if changes:
477 if changes:
478 self._status = list(changes)
478 self._status = list(changes)
479
479
480 self._extra = {}
480 self._extra = {}
481 if extra:
481 if extra:
482 self._extra = extra.copy()
482 self._extra = extra.copy()
483 if 'branch' not in self._extra:
483 if 'branch' not in self._extra:
484 branch = self._repo.dirstate.branch()
484 branch = self._repo.dirstate.branch()
485 try:
485 try:
486 branch = branch.decode('UTF-8').encode('UTF-8')
486 branch = branch.decode('UTF-8').encode('UTF-8')
487 except UnicodeDecodeError:
487 except UnicodeDecodeError:
488 raise util.Abort(_('branch name not in UTF-8!'))
488 raise util.Abort(_('branch name not in UTF-8!'))
489 self._extra['branch'] = branch
489 self._extra['branch'] = branch
490 if self._extra['branch'] == '':
490 if self._extra['branch'] == '':
491 self._extra['branch'] = 'default'
491 self._extra['branch'] = 'default'
492
492
493 def __str__(self):
493 def __str__(self):
494 return str(self._parents[0]) + "+"
494 return str(self._parents[0]) + "+"
495
495
496 def __nonzero__(self):
496 def __nonzero__(self):
497 return True
497 return True
498
498
499 def __contains__(self, key):
499 def __contains__(self, key):
500 return self._dirstate[f] not in "?r"
500 return self._dirstate[f] not in "?r"
501
501
502 def __getattr__(self, name):
502 def __getattr__(self, name):
503 if name == '_status':
503 if name == '_status':
504 self._status = self._repo.status(unknown=True)
504 self._status = self._repo.status(unknown=True)
505 return self._status
505 return self._status
506 elif name == '_user':
506 elif name == '_user':
507 self._user = self._repo.ui.username()
507 self._user = self._repo.ui.username()
508 return self._user
508 return self._user
509 elif name == '_date':
509 elif name == '_date':
510 self._date = util.makedate()
510 self._date = util.makedate()
511 return self._date
511 return self._date
512 if name == '_manifest':
512 if name == '_manifest':
513 self._buildmanifest()
513 self._buildmanifest()
514 return self._manifest
514 return self._manifest
515 elif name == '_parents':
515 elif name == '_parents':
516 p = self._repo.dirstate.parents()
516 p = self._repo.dirstate.parents()
517 if p[1] == nullid:
517 if p[1] == nullid:
518 p = p[:-1]
518 p = p[:-1]
519 self._parents = [changectx(self._repo, x) for x in p]
519 self._parents = [changectx(self._repo, x) for x in p]
520 return self._parents
520 return self._parents
521 else:
521 else:
522 raise AttributeError, name
522 raise AttributeError, name
523
523
524 def _buildmanifest(self):
524 def _buildmanifest(self):
525 """generate a manifest corresponding to the working directory"""
525 """generate a manifest corresponding to the working directory"""
526
526
527 man = self._parents[0].manifest().copy()
527 man = self._parents[0].manifest().copy()
528 copied = self._repo.dirstate.copies()
528 copied = self._repo.dirstate.copies()
529 cf = lambda x: man.flags(copied.get(x, x))
529 cf = lambda x: man.flags(copied.get(x, x))
530 ff = self._repo.dirstate.flagfunc(cf)
530 ff = self._repo.dirstate.flagfunc(cf)
531 modified, added, removed, deleted, unknown = self._status[:5]
531 modified, added, removed, deleted, unknown = self._status[:5]
532 for i, l in (("a", added), ("m", modified), ("u", unknown)):
532 for i, l in (("a", added), ("m", modified), ("u", unknown)):
533 for f in l:
533 for f in l:
534 man[f] = man.get(copied.get(f, f), nullid) + i
534 man[f] = man.get(copied.get(f, f), nullid) + i
535 try:
535 try:
536 man.set(f, ff(f))
536 man.set(f, ff(f))
537 except OSError:
537 except OSError:
538 pass
538 pass
539
539
540 for f in deleted + removed:
540 for f in deleted + removed:
541 if f in man:
541 if f in man:
542 del man[f]
542 del man[f]
543
543
544 self._manifest = man
544 self._manifest = man
545
545
546 def manifest(self): return self._manifest
546 def manifest(self): return self._manifest
547
547
548 def user(self): return self._user or self._repo.ui.username()
548 def user(self): return self._user or self._repo.ui.username()
549 def date(self): return self._date
549 def date(self): return self._date
550 def description(self): return self._text
550 def description(self): return self._text
551 def files(self):
551 def files(self):
552 return util.sort(self._status[0] + self._status[1] + self._status[2])
552 return util.sort(self._status[0] + self._status[1] + self._status[2])
553
553
554 def modified(self): return self._status[0]
554 def modified(self): return self._status[0]
555 def added(self): return self._status[1]
555 def added(self): return self._status[1]
556 def removed(self): return self._status[2]
556 def removed(self): return self._status[2]
557 def deleted(self): return self._status[3]
557 def deleted(self): return self._status[3]
558 def unknown(self): return self._status[4]
558 def unknown(self): return self._status[4]
559 def clean(self): return self._status[5]
559 def clean(self): return self._status[5]
560 def branch(self): return self._extra['branch']
560 def branch(self): return self._extra['branch']
561 def extra(self): return self._extra
561 def extra(self): return self._extra
562
562
563 def tags(self):
563 def tags(self):
564 t = []
564 t = []
565 [t.extend(p.tags()) for p in self.parents()]
565 [t.extend(p.tags()) for p in self.parents()]
566 return t
566 return t
567
567
568 def children(self):
568 def children(self):
569 return []
569 return []
570
570
571 def flags(self, path):
571 def flags(self, path):
572 if '_manifest' in self.__dict__:
572 if '_manifest' in self.__dict__:
573 try:
573 try:
574 return self._manifest.flags(path)
574 return self._manifest.flags(path)
575 except KeyError:
575 except KeyError:
576 return ''
576 return ''
577
577
578 pnode = self._parents[0].changeset()[0]
578 pnode = self._parents[0].changeset()[0]
579 orig = self._repo.dirstate.copies().get(path, path)
579 orig = self._repo.dirstate.copies().get(path, path)
580 node, flag = self._repo.manifest.find(pnode, orig)
580 node, flag = self._repo.manifest.find(pnode, orig)
581 try:
581 try:
582 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
582 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
583 return ff(path)
583 return ff(path)
584 except OSError:
584 except OSError:
585 pass
585 pass
586
586
587 if not node or path in self.deleted() or path in self.removed():
587 if not node or path in self.deleted() or path in self.removed():
588 return ''
588 return ''
589 return flag
589 return flag
590
590
591 def filectx(self, path, filelog=None):
591 def filectx(self, path, filelog=None):
592 """get a file context from the working directory"""
592 """get a file context from the working directory"""
593 return workingfilectx(self._repo, path, workingctx=self,
593 return workingfilectx(self._repo, path, workingctx=self,
594 filelog=filelog)
594 filelog=filelog)
595
595
596 def ancestor(self, c2):
596 def ancestor(self, c2):
597 """return the ancestor context of self and c2"""
597 """return the ancestor context of self and c2"""
598 return self._parents[0].ancestor(c2) # punt on two parents for now
598 return self._parents[0].ancestor(c2) # punt on two parents for now
599
599
600 def walk(self, match):
600 def walk(self, match):
601 for fn, st in self._repo.dirstate.walk(match, True, False):
601 for fn, st in util.sort(self._repo.dirstate.walk(match, True, False)):
602 yield fn
602 yield fn
603
603
604 class workingfilectx(filectx):
604 class workingfilectx(filectx):
605 """A workingfilectx object makes access to data related to a particular
605 """A workingfilectx object makes access to data related to a particular
606 file in the working directory convenient."""
606 file in the working directory convenient."""
607 def __init__(self, repo, path, filelog=None, workingctx=None):
607 def __init__(self, repo, path, filelog=None, workingctx=None):
608 """changeid can be a changeset revision, node, or tag.
608 """changeid can be a changeset revision, node, or tag.
609 fileid can be a file revision or node."""
609 fileid can be a file revision or node."""
610 self._repo = repo
610 self._repo = repo
611 self._path = path
611 self._path = path
612 self._changeid = None
612 self._changeid = None
613 self._filerev = self._filenode = None
613 self._filerev = self._filenode = None
614
614
615 if filelog:
615 if filelog:
616 self._filelog = filelog
616 self._filelog = filelog
617 if workingctx:
617 if workingctx:
618 self._changectx = workingctx
618 self._changectx = workingctx
619
619
620 def __getattr__(self, name):
620 def __getattr__(self, name):
621 if name == '_changectx':
621 if name == '_changectx':
622 self._changectx = workingctx(self._repo)
622 self._changectx = workingctx(self._repo)
623 return self._changectx
623 return self._changectx
624 elif name == '_repopath':
624 elif name == '_repopath':
625 self._repopath = (self._repo.dirstate.copied(self._path)
625 self._repopath = (self._repo.dirstate.copied(self._path)
626 or self._path)
626 or self._path)
627 return self._repopath
627 return self._repopath
628 elif name == '_filelog':
628 elif name == '_filelog':
629 self._filelog = self._repo.file(self._repopath)
629 self._filelog = self._repo.file(self._repopath)
630 return self._filelog
630 return self._filelog
631 else:
631 else:
632 raise AttributeError, name
632 raise AttributeError, name
633
633
634 def __nonzero__(self):
634 def __nonzero__(self):
635 return True
635 return True
636
636
637 def __str__(self):
637 def __str__(self):
638 return "%s@%s" % (self.path(), self._changectx)
638 return "%s@%s" % (self.path(), self._changectx)
639
639
640 def filectx(self, fileid):
640 def filectx(self, fileid):
641 '''opens an arbitrary revision of the file without
641 '''opens an arbitrary revision of the file without
642 opening a new filelog'''
642 opening a new filelog'''
643 return filectx(self._repo, self._repopath, fileid=fileid,
643 return filectx(self._repo, self._repopath, fileid=fileid,
644 filelog=self._filelog)
644 filelog=self._filelog)
645
645
646 def rev(self):
646 def rev(self):
647 if '_changectx' in self.__dict__:
647 if '_changectx' in self.__dict__:
648 return self._changectx.rev()
648 return self._changectx.rev()
649 return self._filelog.linkrev(self._filenode)
649 return self._filelog.linkrev(self._filenode)
650
650
651 def data(self): return self._repo.wread(self._path)
651 def data(self): return self._repo.wread(self._path)
652 def renamed(self):
652 def renamed(self):
653 rp = self._repopath
653 rp = self._repopath
654 if rp == self._path:
654 if rp == self._path:
655 return None
655 return None
656 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
656 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
657
657
658 def parents(self):
658 def parents(self):
659 '''return parent filectxs, following copies if necessary'''
659 '''return parent filectxs, following copies if necessary'''
660 p = self._path
660 p = self._path
661 rp = self._repopath
661 rp = self._repopath
662 pcl = self._changectx._parents
662 pcl = self._changectx._parents
663 fl = self._filelog
663 fl = self._filelog
664 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
664 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
665 if len(pcl) > 1:
665 if len(pcl) > 1:
666 if rp != p:
666 if rp != p:
667 fl = None
667 fl = None
668 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
668 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
669
669
670 return [filectx(self._repo, p, fileid=n, filelog=l)
670 return [filectx(self._repo, p, fileid=n, filelog=l)
671 for p,n,l in pl if n != nullid]
671 for p,n,l in pl if n != nullid]
672
672
673 def children(self):
673 def children(self):
674 return []
674 return []
675
675
676 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
676 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
677 def date(self):
677 def date(self):
678 t, tz = self._changectx.date()
678 t, tz = self._changectx.date()
679 try:
679 try:
680 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
680 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
681 except OSError, err:
681 except OSError, err:
682 if err.errno != errno.ENOENT: raise
682 if err.errno != errno.ENOENT: raise
683 return (t, tz)
683 return (t, tz)
684
684
685 def cmp(self, text): return self._repo.wread(self._path) == text
685 def cmp(self, text): return self._repo.wread(self._path) == text
686
686
687 class memctx(object):
687 class memctx(object):
688 """A memctx is a subset of changectx supposed to be built on memory
688 """A memctx is a subset of changectx supposed to be built on memory
689 and passed to commit functions.
689 and passed to commit functions.
690
690
691 NOTE: this interface and the related memfilectx are experimental and
691 NOTE: this interface and the related memfilectx are experimental and
692 may change without notice.
692 may change without notice.
693
693
694 parents - a pair of parent nodeids.
694 parents - a pair of parent nodeids.
695 filectxfn - a callable taking (repo, memctx, path) arguments and
695 filectxfn - a callable taking (repo, memctx, path) arguments and
696 returning a memctx object.
696 returning a memctx object.
697 date - any valid date string or (unixtime, offset), or None.
697 date - any valid date string or (unixtime, offset), or None.
698 user - username string, or None.
698 user - username string, or None.
699 extra - a dictionary of extra values, or None.
699 extra - a dictionary of extra values, or None.
700 """
700 """
701 def __init__(self, repo, parents, text, files, filectxfn, user=None,
701 def __init__(self, repo, parents, text, files, filectxfn, user=None,
702 date=None, extra=None):
702 date=None, extra=None):
703 self._repo = repo
703 self._repo = repo
704 self._rev = None
704 self._rev = None
705 self._node = None
705 self._node = None
706 self._text = text
706 self._text = text
707 self._date = date and util.parsedate(date) or util.makedate()
707 self._date = date and util.parsedate(date) or util.makedate()
708 self._user = user
708 self._user = user
709 parents = [(p or nullid) for p in parents]
709 parents = [(p or nullid) for p in parents]
710 p1, p2 = parents
710 p1, p2 = parents
711 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
711 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
712 files = util.sort(list(files))
712 files = util.sort(list(files))
713 self._status = [files, [], [], [], []]
713 self._status = [files, [], [], [], []]
714 self._filectxfn = filectxfn
714 self._filectxfn = filectxfn
715
715
716 self._extra = extra and extra.copy() or {}
716 self._extra = extra and extra.copy() or {}
717 if 'branch' not in self._extra:
717 if 'branch' not in self._extra:
718 self._extra['branch'] = 'default'
718 self._extra['branch'] = 'default'
719 elif self._extra.get('branch') == '':
719 elif self._extra.get('branch') == '':
720 self._extra['branch'] = 'default'
720 self._extra['branch'] = 'default'
721
721
722 def __str__(self):
722 def __str__(self):
723 return str(self._parents[0]) + "+"
723 return str(self._parents[0]) + "+"
724
724
725 def __int__(self):
725 def __int__(self):
726 return self._rev
726 return self._rev
727
727
728 def __nonzero__(self):
728 def __nonzero__(self):
729 return True
729 return True
730
730
731 def user(self): return self._user or self._repo.ui.username()
731 def user(self): return self._user or self._repo.ui.username()
732 def date(self): return self._date
732 def date(self): return self._date
733 def description(self): return self._text
733 def description(self): return self._text
734 def files(self): return self.modified()
734 def files(self): return self.modified()
735 def modified(self): return self._status[0]
735 def modified(self): return self._status[0]
736 def added(self): return self._status[1]
736 def added(self): return self._status[1]
737 def removed(self): return self._status[2]
737 def removed(self): return self._status[2]
738 def deleted(self): return self._status[3]
738 def deleted(self): return self._status[3]
739 def unknown(self): return self._status[4]
739 def unknown(self): return self._status[4]
740 def clean(self): return self._status[5]
740 def clean(self): return self._status[5]
741 def branch(self): return self._extra['branch']
741 def branch(self): return self._extra['branch']
742 def extra(self): return self._extra
742 def extra(self): return self._extra
743 def flags(self, f): return self[f].flags()
743 def flags(self, f): return self[f].flags()
744
744
745 def parents(self):
745 def parents(self):
746 """return contexts for each parent changeset"""
746 """return contexts for each parent changeset"""
747 return self._parents
747 return self._parents
748
748
749 def filectx(self, path, filelog=None):
749 def filectx(self, path, filelog=None):
750 """get a file context from the working directory"""
750 """get a file context from the working directory"""
751 return self._filectxfn(self._repo, self, path)
751 return self._filectxfn(self._repo, self, path)
752
752
753 class memfilectx(object):
753 class memfilectx(object):
754 """A memfilectx is a subset of filectx supposed to be built by client
754 """A memfilectx is a subset of filectx supposed to be built by client
755 code and passed to commit functions.
755 code and passed to commit functions.
756 """
756 """
757 def __init__(self, path, data, islink, isexec, copied):
757 def __init__(self, path, data, islink, isexec, copied):
758 """copied is the source file path, or None."""
758 """copied is the source file path, or None."""
759 self._path = path
759 self._path = path
760 self._data = data
760 self._data = data
761 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
761 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
762 self._copied = None
762 self._copied = None
763 if copied:
763 if copied:
764 self._copied = (copied, nullid)
764 self._copied = (copied, nullid)
765
765
766 def __nonzero__(self): return True
766 def __nonzero__(self): return True
767 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
767 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
768 def path(self): return self._path
768 def path(self): return self._path
769 def data(self): return self._data
769 def data(self): return self._data
770 def flags(self): return self._flags
770 def flags(self): return self._flags
771 def isexec(self): return 'x' in self._flags
771 def isexec(self): return 'x' in self._flags
772 def islink(self): return 'l' in self._flags
772 def islink(self): return 'l' in self._flags
773 def renamed(self): return self._copied
773 def renamed(self): return self._copied
774
774
@@ -1,609 +1,605
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import nullid
10 from node import nullid
11 from i18n import _
11 from i18n import _
12 import struct, os, bisect, stat, util, errno, ignore
12 import struct, os, bisect, stat, util, errno, ignore
13 import cStringIO, osutil, sys
13 import cStringIO, osutil, sys
14
14
15 _unknown = ('?', 0, 0, 0)
15 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
16 _format = ">cllll"
17
17
18 def _finddirs(path):
18 def _finddirs(path):
19 pos = len(path)
19 pos = len(path)
20 while 1:
20 while 1:
21 pos = path.rfind('/', 0, pos)
21 pos = path.rfind('/', 0, pos)
22 if pos == -1:
22 if pos == -1:
23 break
23 break
24 yield path[:pos]
24 yield path[:pos]
25
25
26 class dirstate(object):
26 class dirstate(object):
27
27
28 def __init__(self, opener, ui, root):
28 def __init__(self, opener, ui, root):
29 self._opener = opener
29 self._opener = opener
30 self._root = root
30 self._root = root
31 self._dirty = False
31 self._dirty = False
32 self._dirtypl = False
32 self._dirtypl = False
33 self._ui = ui
33 self._ui = ui
34
34
35 def __getattr__(self, name):
35 def __getattr__(self, name):
36 if name == '_map':
36 if name == '_map':
37 self._read()
37 self._read()
38 return self._map
38 return self._map
39 elif name == '_copymap':
39 elif name == '_copymap':
40 self._read()
40 self._read()
41 return self._copymap
41 return self._copymap
42 elif name == '_foldmap':
42 elif name == '_foldmap':
43 _foldmap = {}
43 _foldmap = {}
44 for name in self._map:
44 for name in self._map:
45 norm = os.path.normcase(os.path.normpath(name))
45 norm = os.path.normcase(os.path.normpath(name))
46 _foldmap[norm] = name
46 _foldmap[norm] = name
47 self._foldmap = _foldmap
47 self._foldmap = _foldmap
48 return self._foldmap
48 return self._foldmap
49 elif name == '_branch':
49 elif name == '_branch':
50 try:
50 try:
51 self._branch = (self._opener("branch").read().strip()
51 self._branch = (self._opener("branch").read().strip()
52 or "default")
52 or "default")
53 except IOError:
53 except IOError:
54 self._branch = "default"
54 self._branch = "default"
55 return self._branch
55 return self._branch
56 elif name == '_pl':
56 elif name == '_pl':
57 self._pl = [nullid, nullid]
57 self._pl = [nullid, nullid]
58 try:
58 try:
59 st = self._opener("dirstate").read(40)
59 st = self._opener("dirstate").read(40)
60 if len(st) == 40:
60 if len(st) == 40:
61 self._pl = st[:20], st[20:40]
61 self._pl = st[:20], st[20:40]
62 except IOError, err:
62 except IOError, err:
63 if err.errno != errno.ENOENT: raise
63 if err.errno != errno.ENOENT: raise
64 return self._pl
64 return self._pl
65 elif name == '_dirs':
65 elif name == '_dirs':
66 dirs = {}
66 dirs = {}
67 for f,s in self._map.items():
67 for f,s in self._map.items():
68 if s[0] != 'r':
68 if s[0] != 'r':
69 for base in _finddirs(f):
69 for base in _finddirs(f):
70 dirs[base] = dirs.get(base, 0) + 1
70 dirs[base] = dirs.get(base, 0) + 1
71 self._dirs = dirs
71 self._dirs = dirs
72 return self._dirs
72 return self._dirs
73 elif name == '_ignore':
73 elif name == '_ignore':
74 files = [self._join('.hgignore')]
74 files = [self._join('.hgignore')]
75 for name, path in self._ui.configitems("ui"):
75 for name, path in self._ui.configitems("ui"):
76 if name == 'ignore' or name.startswith('ignore.'):
76 if name == 'ignore' or name.startswith('ignore.'):
77 files.append(os.path.expanduser(path))
77 files.append(os.path.expanduser(path))
78 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
78 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
79 return self._ignore
79 return self._ignore
80 elif name == '_slash':
80 elif name == '_slash':
81 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
81 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
82 return self._slash
82 return self._slash
83 elif name == '_checklink':
83 elif name == '_checklink':
84 self._checklink = util.checklink(self._root)
84 self._checklink = util.checklink(self._root)
85 return self._checklink
85 return self._checklink
86 elif name == '_checkexec':
86 elif name == '_checkexec':
87 self._checkexec = util.checkexec(self._root)
87 self._checkexec = util.checkexec(self._root)
88 return self._checkexec
88 return self._checkexec
89 elif name == '_checkcase':
89 elif name == '_checkcase':
90 self._checkcase = not util.checkcase(self._join('.hg'))
90 self._checkcase = not util.checkcase(self._join('.hg'))
91 return self._checkcase
91 return self._checkcase
92 elif name == 'normalize':
92 elif name == 'normalize':
93 if self._checkcase:
93 if self._checkcase:
94 self.normalize = self._normalize
94 self.normalize = self._normalize
95 else:
95 else:
96 self.normalize = lambda x: x
96 self.normalize = lambda x: x
97 return self.normalize
97 return self.normalize
98 else:
98 else:
99 raise AttributeError, name
99 raise AttributeError, name
100
100
101 def _join(self, f):
101 def _join(self, f):
102 return os.path.join(self._root, f)
102 return os.path.join(self._root, f)
103
103
104 def flagfunc(self, fallback):
104 def flagfunc(self, fallback):
105 if self._checklink:
105 if self._checklink:
106 if self._checkexec:
106 if self._checkexec:
107 def f(x):
107 def f(x):
108 p = os.path.join(self._root, x)
108 p = os.path.join(self._root, x)
109 if os.path.islink(p):
109 if os.path.islink(p):
110 return 'l'
110 return 'l'
111 if util.is_exec(p):
111 if util.is_exec(p):
112 return 'x'
112 return 'x'
113 return ''
113 return ''
114 return f
114 return f
115 def f(x):
115 def f(x):
116 if os.path.islink(os.path.join(self._root, x)):
116 if os.path.islink(os.path.join(self._root, x)):
117 return 'l'
117 return 'l'
118 if 'x' in fallback(x):
118 if 'x' in fallback(x):
119 return 'x'
119 return 'x'
120 return ''
120 return ''
121 return f
121 return f
122 if self._checkexec:
122 if self._checkexec:
123 def f(x):
123 def f(x):
124 if 'l' in fallback(x):
124 if 'l' in fallback(x):
125 return 'l'
125 return 'l'
126 if util.is_exec(os.path.join(self._root, x)):
126 if util.is_exec(os.path.join(self._root, x)):
127 return 'x'
127 return 'x'
128 return ''
128 return ''
129 return f
129 return f
130 return fallback
130 return fallback
131
131
132 def getcwd(self):
132 def getcwd(self):
133 cwd = os.getcwd()
133 cwd = os.getcwd()
134 if cwd == self._root: return ''
134 if cwd == self._root: return ''
135 # self._root ends with a path separator if self._root is '/' or 'C:\'
135 # self._root ends with a path separator if self._root is '/' or 'C:\'
136 rootsep = self._root
136 rootsep = self._root
137 if not util.endswithsep(rootsep):
137 if not util.endswithsep(rootsep):
138 rootsep += os.sep
138 rootsep += os.sep
139 if cwd.startswith(rootsep):
139 if cwd.startswith(rootsep):
140 return cwd[len(rootsep):]
140 return cwd[len(rootsep):]
141 else:
141 else:
142 # we're outside the repo. return an absolute path.
142 # we're outside the repo. return an absolute path.
143 return cwd
143 return cwd
144
144
145 def pathto(self, f, cwd=None):
145 def pathto(self, f, cwd=None):
146 if cwd is None:
146 if cwd is None:
147 cwd = self.getcwd()
147 cwd = self.getcwd()
148 path = util.pathto(self._root, cwd, f)
148 path = util.pathto(self._root, cwd, f)
149 if self._slash:
149 if self._slash:
150 return util.normpath(path)
150 return util.normpath(path)
151 return path
151 return path
152
152
153 def __getitem__(self, key):
153 def __getitem__(self, key):
154 ''' current states:
154 ''' current states:
155 n normal
155 n normal
156 m needs merging
156 m needs merging
157 r marked for removal
157 r marked for removal
158 a marked for addition
158 a marked for addition
159 ? not tracked'''
159 ? not tracked'''
160 return self._map.get(key, ("?",))[0]
160 return self._map.get(key, ("?",))[0]
161
161
162 def __contains__(self, key):
162 def __contains__(self, key):
163 return key in self._map
163 return key in self._map
164
164
165 def __iter__(self):
165 def __iter__(self):
166 for x in util.sort(self._map):
166 for x in util.sort(self._map):
167 yield x
167 yield x
168
168
169 def parents(self):
169 def parents(self):
170 return self._pl
170 return self._pl
171
171
172 def branch(self):
172 def branch(self):
173 return self._branch
173 return self._branch
174
174
175 def setparents(self, p1, p2=nullid):
175 def setparents(self, p1, p2=nullid):
176 self._dirty = self._dirtypl = True
176 self._dirty = self._dirtypl = True
177 self._pl = p1, p2
177 self._pl = p1, p2
178
178
179 def setbranch(self, branch):
179 def setbranch(self, branch):
180 self._branch = branch
180 self._branch = branch
181 self._opener("branch", "w").write(branch + '\n')
181 self._opener("branch", "w").write(branch + '\n')
182
182
183 def _read(self):
183 def _read(self):
184 self._map = {}
184 self._map = {}
185 self._copymap = {}
185 self._copymap = {}
186 if not self._dirtypl:
186 if not self._dirtypl:
187 self._pl = [nullid, nullid]
187 self._pl = [nullid, nullid]
188 try:
188 try:
189 st = self._opener("dirstate").read()
189 st = self._opener("dirstate").read()
190 except IOError, err:
190 except IOError, err:
191 if err.errno != errno.ENOENT: raise
191 if err.errno != errno.ENOENT: raise
192 return
192 return
193 if not st:
193 if not st:
194 return
194 return
195
195
196 if not self._dirtypl:
196 if not self._dirtypl:
197 self._pl = [st[:20], st[20: 40]]
197 self._pl = [st[:20], st[20: 40]]
198
198
199 # deref fields so they will be local in loop
199 # deref fields so they will be local in loop
200 dmap = self._map
200 dmap = self._map
201 copymap = self._copymap
201 copymap = self._copymap
202 unpack = struct.unpack
202 unpack = struct.unpack
203 e_size = struct.calcsize(_format)
203 e_size = struct.calcsize(_format)
204 pos1 = 40
204 pos1 = 40
205 l = len(st)
205 l = len(st)
206
206
207 # the inner loop
207 # the inner loop
208 while pos1 < l:
208 while pos1 < l:
209 pos2 = pos1 + e_size
209 pos2 = pos1 + e_size
210 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
210 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
211 pos1 = pos2 + e[4]
211 pos1 = pos2 + e[4]
212 f = st[pos2:pos1]
212 f = st[pos2:pos1]
213 if '\0' in f:
213 if '\0' in f:
214 f, c = f.split('\0')
214 f, c = f.split('\0')
215 copymap[f] = c
215 copymap[f] = c
216 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
216 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
217
217
218 def invalidate(self):
218 def invalidate(self):
219 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
219 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
220 if a in self.__dict__:
220 if a in self.__dict__:
221 delattr(self, a)
221 delattr(self, a)
222 self._dirty = False
222 self._dirty = False
223
223
224 def copy(self, source, dest):
224 def copy(self, source, dest):
225 if source == dest:
225 if source == dest:
226 return
226 return
227 self._dirty = True
227 self._dirty = True
228 self._copymap[dest] = source
228 self._copymap[dest] = source
229
229
230 def copied(self, file):
230 def copied(self, file):
231 return self._copymap.get(file, None)
231 return self._copymap.get(file, None)
232
232
233 def copies(self):
233 def copies(self):
234 return self._copymap
234 return self._copymap
235
235
236 def _droppath(self, f):
236 def _droppath(self, f):
237 if self[f] not in "?r" and "_dirs" in self.__dict__:
237 if self[f] not in "?r" and "_dirs" in self.__dict__:
238 dirs = self._dirs
238 dirs = self._dirs
239 for base in _finddirs(f):
239 for base in _finddirs(f):
240 if dirs[base] == 1:
240 if dirs[base] == 1:
241 del dirs[base]
241 del dirs[base]
242 else:
242 else:
243 dirs[base] -= 1
243 dirs[base] -= 1
244
244
245 def _addpath(self, f, check=False):
245 def _addpath(self, f, check=False):
246 oldstate = self[f]
246 oldstate = self[f]
247 if check or oldstate == "r":
247 if check or oldstate == "r":
248 if '\r' in f or '\n' in f:
248 if '\r' in f or '\n' in f:
249 raise util.Abort(
249 raise util.Abort(
250 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
250 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
251 if f in self._dirs:
251 if f in self._dirs:
252 raise util.Abort(_('directory %r already in dirstate') % f)
252 raise util.Abort(_('directory %r already in dirstate') % f)
253 # shadows
253 # shadows
254 for d in _finddirs(f):
254 for d in _finddirs(f):
255 if d in self._dirs:
255 if d in self._dirs:
256 break
256 break
257 if d in self._map and self[d] != 'r':
257 if d in self._map and self[d] != 'r':
258 raise util.Abort(
258 raise util.Abort(
259 _('file %r in dirstate clashes with %r') % (d, f))
259 _('file %r in dirstate clashes with %r') % (d, f))
260 if oldstate in "?r" and "_dirs" in self.__dict__:
260 if oldstate in "?r" and "_dirs" in self.__dict__:
261 dirs = self._dirs
261 dirs = self._dirs
262 for base in _finddirs(f):
262 for base in _finddirs(f):
263 dirs[base] = dirs.get(base, 0) + 1
263 dirs[base] = dirs.get(base, 0) + 1
264
264
265 def normal(self, f):
265 def normal(self, f):
266 'mark a file normal and clean'
266 'mark a file normal and clean'
267 self._dirty = True
267 self._dirty = True
268 self._addpath(f)
268 self._addpath(f)
269 s = os.lstat(self._join(f))
269 s = os.lstat(self._join(f))
270 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
270 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
271 if f in self._copymap:
271 if f in self._copymap:
272 del self._copymap[f]
272 del self._copymap[f]
273
273
274 def normallookup(self, f):
274 def normallookup(self, f):
275 'mark a file normal, but possibly dirty'
275 'mark a file normal, but possibly dirty'
276 if self._pl[1] != nullid and f in self._map:
276 if self._pl[1] != nullid and f in self._map:
277 # if there is a merge going on and the file was either
277 # if there is a merge going on and the file was either
278 # in state 'm' or dirty before being removed, restore that state.
278 # in state 'm' or dirty before being removed, restore that state.
279 entry = self._map[f]
279 entry = self._map[f]
280 if entry[0] == 'r' and entry[2] in (-1, -2):
280 if entry[0] == 'r' and entry[2] in (-1, -2):
281 source = self._copymap.get(f)
281 source = self._copymap.get(f)
282 if entry[2] == -1:
282 if entry[2] == -1:
283 self.merge(f)
283 self.merge(f)
284 elif entry[2] == -2:
284 elif entry[2] == -2:
285 self.normaldirty(f)
285 self.normaldirty(f)
286 if source:
286 if source:
287 self.copy(source, f)
287 self.copy(source, f)
288 return
288 return
289 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
289 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
290 return
290 return
291 self._dirty = True
291 self._dirty = True
292 self._addpath(f)
292 self._addpath(f)
293 self._map[f] = ('n', 0, -1, -1, 0)
293 self._map[f] = ('n', 0, -1, -1, 0)
294 if f in self._copymap:
294 if f in self._copymap:
295 del self._copymap[f]
295 del self._copymap[f]
296
296
297 def normaldirty(self, f):
297 def normaldirty(self, f):
298 'mark a file normal, but dirty'
298 'mark a file normal, but dirty'
299 self._dirty = True
299 self._dirty = True
300 self._addpath(f)
300 self._addpath(f)
301 self._map[f] = ('n', 0, -2, -1, 0)
301 self._map[f] = ('n', 0, -2, -1, 0)
302 if f in self._copymap:
302 if f in self._copymap:
303 del self._copymap[f]
303 del self._copymap[f]
304
304
305 def add(self, f):
305 def add(self, f):
306 'mark a file added'
306 'mark a file added'
307 self._dirty = True
307 self._dirty = True
308 self._addpath(f, True)
308 self._addpath(f, True)
309 self._map[f] = ('a', 0, -1, -1, 0)
309 self._map[f] = ('a', 0, -1, -1, 0)
310 if f in self._copymap:
310 if f in self._copymap:
311 del self._copymap[f]
311 del self._copymap[f]
312
312
313 def remove(self, f):
313 def remove(self, f):
314 'mark a file removed'
314 'mark a file removed'
315 self._dirty = True
315 self._dirty = True
316 self._droppath(f)
316 self._droppath(f)
317 size = 0
317 size = 0
318 if self._pl[1] != nullid and f in self._map:
318 if self._pl[1] != nullid and f in self._map:
319 entry = self._map[f]
319 entry = self._map[f]
320 if entry[0] == 'm':
320 if entry[0] == 'm':
321 size = -1
321 size = -1
322 elif entry[0] == 'n' and entry[2] == -2:
322 elif entry[0] == 'n' and entry[2] == -2:
323 size = -2
323 size = -2
324 self._map[f] = ('r', 0, size, 0, 0)
324 self._map[f] = ('r', 0, size, 0, 0)
325 if size == 0 and f in self._copymap:
325 if size == 0 and f in self._copymap:
326 del self._copymap[f]
326 del self._copymap[f]
327
327
328 def merge(self, f):
328 def merge(self, f):
329 'mark a file merged'
329 'mark a file merged'
330 self._dirty = True
330 self._dirty = True
331 s = os.lstat(self._join(f))
331 s = os.lstat(self._join(f))
332 self._addpath(f)
332 self._addpath(f)
333 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
333 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
334 if f in self._copymap:
334 if f in self._copymap:
335 del self._copymap[f]
335 del self._copymap[f]
336
336
337 def forget(self, f):
337 def forget(self, f):
338 'forget a file'
338 'forget a file'
339 self._dirty = True
339 self._dirty = True
340 try:
340 try:
341 self._droppath(f)
341 self._droppath(f)
342 del self._map[f]
342 del self._map[f]
343 except KeyError:
343 except KeyError:
344 self._ui.warn(_("not in dirstate: %s\n") % f)
344 self._ui.warn(_("not in dirstate: %s\n") % f)
345
345
346 def _normalize(self, path):
346 def _normalize(self, path):
347 if path not in self._foldmap:
347 if path not in self._foldmap:
348 if not os.path.exists(path):
348 if not os.path.exists(path):
349 return path
349 return path
350 self._foldmap[path] = util.fspath(path, self._root)
350 self._foldmap[path] = util.fspath(path, self._root)
351 return self._foldmap[path]
351 return self._foldmap[path]
352
352
353 def clear(self):
353 def clear(self):
354 self._map = {}
354 self._map = {}
355 if "_dirs" in self.__dict__:
355 if "_dirs" in self.__dict__:
356 delattr(self, "_dirs");
356 delattr(self, "_dirs");
357 self._copymap = {}
357 self._copymap = {}
358 self._pl = [nullid, nullid]
358 self._pl = [nullid, nullid]
359 self._dirty = True
359 self._dirty = True
360
360
361 def rebuild(self, parent, files):
361 def rebuild(self, parent, files):
362 self.clear()
362 self.clear()
363 for f in files:
363 for f in files:
364 if 'x' in files.flags(f):
364 if 'x' in files.flags(f):
365 self._map[f] = ('n', 0777, -1, 0, 0)
365 self._map[f] = ('n', 0777, -1, 0, 0)
366 else:
366 else:
367 self._map[f] = ('n', 0666, -1, 0, 0)
367 self._map[f] = ('n', 0666, -1, 0, 0)
368 self._pl = (parent, nullid)
368 self._pl = (parent, nullid)
369 self._dirty = True
369 self._dirty = True
370
370
371 def write(self):
371 def write(self):
372 if not self._dirty:
372 if not self._dirty:
373 return
373 return
374 st = self._opener("dirstate", "w", atomictemp=True)
374 st = self._opener("dirstate", "w", atomictemp=True)
375
375
376 try:
376 try:
377 gran = int(self._ui.config('dirstate', 'granularity', 1))
377 gran = int(self._ui.config('dirstate', 'granularity', 1))
378 except ValueError:
378 except ValueError:
379 gran = 1
379 gran = 1
380 limit = sys.maxint
380 limit = sys.maxint
381 if gran > 0:
381 if gran > 0:
382 limit = util.fstat(st).st_mtime - gran
382 limit = util.fstat(st).st_mtime - gran
383
383
384 cs = cStringIO.StringIO()
384 cs = cStringIO.StringIO()
385 copymap = self._copymap
385 copymap = self._copymap
386 pack = struct.pack
386 pack = struct.pack
387 write = cs.write
387 write = cs.write
388 write("".join(self._pl))
388 write("".join(self._pl))
389 for f, e in self._map.iteritems():
389 for f, e in self._map.iteritems():
390 if f in copymap:
390 if f in copymap:
391 f = "%s\0%s" % (f, copymap[f])
391 f = "%s\0%s" % (f, copymap[f])
392 if e[3] > limit and e[0] == 'n':
392 if e[3] > limit and e[0] == 'n':
393 e = (e[0], 0, -1, -1, 0)
393 e = (e[0], 0, -1, -1, 0)
394 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
394 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
395 write(e)
395 write(e)
396 write(f)
396 write(f)
397 st.write(cs.getvalue())
397 st.write(cs.getvalue())
398 st.rename()
398 st.rename()
399 self._dirty = self._dirtypl = False
399 self._dirty = self._dirtypl = False
400
400
401 def _supported(self, f, mode, verbose=False):
401 def _supported(self, f, mode, verbose=False):
402 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
402 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
403 return True
403 return True
404 if verbose:
404 if verbose:
405 kind = 'unknown'
405 kind = 'unknown'
406 if stat.S_ISCHR(mode): kind = _('character device')
406 if stat.S_ISCHR(mode): kind = _('character device')
407 elif stat.S_ISBLK(mode): kind = _('block device')
407 elif stat.S_ISBLK(mode): kind = _('block device')
408 elif stat.S_ISFIFO(mode): kind = _('fifo')
408 elif stat.S_ISFIFO(mode): kind = _('fifo')
409 elif stat.S_ISSOCK(mode): kind = _('socket')
409 elif stat.S_ISSOCK(mode): kind = _('socket')
410 elif stat.S_ISDIR(mode): kind = _('directory')
410 elif stat.S_ISDIR(mode): kind = _('directory')
411 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
411 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
412 % (self.pathto(f), kind))
412 % (self.pathto(f), kind))
413 return False
413 return False
414
414
415 def _dirignore(self, f):
415 def _dirignore(self, f):
416 if f == '.':
416 if f == '.':
417 return False
417 return False
418 if self._ignore(f):
418 if self._ignore(f):
419 return True
419 return True
420 for p in _finddirs(f):
420 for p in _finddirs(f):
421 if self._ignore(p):
421 if self._ignore(p):
422 return True
422 return True
423 return False
423 return False
424
424
425 def walk(self, match, unknown, ignored):
425 def walk(self, match, unknown, ignored):
426 '''
426 '''
427 walk recursively through the directory tree, finding all files
427 walk recursively through the directory tree, finding all files
428 matched by the match function
428 matched by the match function
429
429
430 results are yielded in a tuple (filename, stat), where stat
430 results are yielded in a tuple (filename, stat), where stat
431 and st is the stat result if the file was found in the directory.
431 and st is the stat result if the file was found in the directory.
432 '''
432 '''
433
433
434 def fwarn(f, msg):
434 def fwarn(f, msg):
435 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
435 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
436 return False
436 return False
437 badfn = fwarn
437 badfn = fwarn
438 if hasattr(match, 'bad'):
438 if hasattr(match, 'bad'):
439 badfn = match.bad
439 badfn = match.bad
440
440
441 files = util.unique(match.files())
441 files = util.unique(match.files())
442 if not files or '.' in files:
442 if not files or '.' in files:
443 files = ['']
443 files = ['']
444 dmap = self._map
444 dmap = self._map
445
445
446 def imatch(file_):
446 def imatch(file_):
447 if file_ not in dmap and self._ignore(file_):
447 if file_ not in dmap and self._ignore(file_):
448 return False
448 return False
449 return match(file_)
449 return match(file_)
450
450
451 # TODO: don't walk unknown directories if unknown and ignored are False
451 # TODO: don't walk unknown directories if unknown and ignored are False
452 ignore = self._ignore
452 ignore = self._ignore
453 dirignore = self._dirignore
453 dirignore = self._dirignore
454 if ignored:
454 if ignored:
455 imatch = match
455 imatch = match
456 ignore = util.never
456 ignore = util.never
457 dirignore = util.never
457 dirignore = util.never
458
458
459 normpath = util.normpath
459 normpath = util.normpath
460 normalize = self.normalize
460 normalize = self.normalize
461 listdir = osutil.listdir
461 listdir = osutil.listdir
462 lstat = os.lstat
462 lstat = os.lstat
463 bisect_left = bisect.bisect_left
463 bisect_left = bisect.bisect_left
464 isdir = os.path.isdir
464 isdir = os.path.isdir
465 pconvert = util.pconvert
465 pconvert = util.pconvert
466 join = os.path.join
466 join = os.path.join
467 s_isdir = stat.S_ISDIR
467 s_isdir = stat.S_ISDIR
468 supported = self._supported
468 supported = self._supported
469 _join = self._join
469 _join = self._join
470 work = []
470 work = []
471 wadd = work.append
471 wadd = work.append
472 found = []
473 add = found.append
474
472
475 seen = {'.hg': 1}
473 seen = {'.hg': 1}
476
474
477 # step 1: find all explicit files
475 # step 1: find all explicit files
478 for ff in util.sort(files):
476 for ff in util.sort(files):
479 nf = normalize(normpath(ff))
477 nf = normalize(normpath(ff))
480 if nf in seen:
478 if nf in seen:
481 continue
479 continue
482
480
483 try:
481 try:
484 st = lstat(_join(nf))
482 st = lstat(_join(nf))
485 except OSError, inst:
483 except OSError, inst:
486 keep = False
484 keep = False
487 for fn in dmap:
485 for fn in dmap:
488 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
486 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
489 keep = True
487 keep = True
490 break
488 break
491 if not keep:
489 if not keep:
492 if inst.errno != errno.ENOENT:
490 if inst.errno != errno.ENOENT:
493 fwarn(ff, inst.strerror)
491 fwarn(ff, inst.strerror)
494 elif badfn(ff, inst.strerror) and imatch(nf):
492 elif badfn(ff, inst.strerror) and imatch(nf):
495 yield nf, None
493 yield nf, None
496 continue
494 continue
497
495
498 if s_isdir(st.st_mode):
496 if s_isdir(st.st_mode):
499 if not dirignore(nf):
497 if not dirignore(nf):
500 wadd(nf)
498 wadd(nf)
501 else:
499 else:
502 seen[nf] = 1
500 seen[nf] = 1
503 if supported(ff, st.st_mode, verbose=True):
501 if supported(ff, st.st_mode, verbose=True):
504 yield nf, st
502 yield nf, st
505 elif nf in dmap:
503 elif nf in dmap:
506 yield nf, None
504 yield nf, None
507
505
508 # step 2: visit subdirectories
506 # step 2: visit subdirectories
509 while work:
507 while work:
510 nd = work.pop()
508 nd = work.pop()
511 if hasattr(match, 'dir'):
509 if hasattr(match, 'dir'):
512 match.dir(nd)
510 match.dir(nd)
513 entries = listdir(_join(nd), stat=True)
511 entries = listdir(_join(nd), stat=True)
514 # nd is the top of the repository dir tree
512 # nd is the top of the repository dir tree
515 if nd == '.':
513 if nd == '.':
516 nd = ''
514 nd = ''
517 else:
515 else:
518 # do not recurse into a repo contained in this
516 # do not recurse into a repo contained in this
519 # one. use bisect to find .hg directory so speed
517 # one. use bisect to find .hg directory so speed
520 # is good on big directory.
518 # is good on big directory.
521 hg = bisect_left(entries, ('.hg'))
519 hg = bisect_left(entries, ('.hg'))
522 if hg < len(entries) and entries[hg][0] == '.hg' \
520 if hg < len(entries) and entries[hg][0] == '.hg' \
523 and entries[hg][1] == stat.S_IFDIR:
521 and entries[hg][1] == stat.S_IFDIR:
524 continue
522 continue
525 for f, kind, st in entries:
523 for f, kind, st in entries:
526 nf = normalize(pconvert(join(nd, f)))
524 nf = normalize(pconvert(join(nd, f)))
527 if nf in seen:
525 if nf in seen:
528 continue
526 continue
529 seen[nf] = 1
527 seen[nf] = 1
530 # don't trip over symlinks
528 # don't trip over symlinks
531 if kind == stat.S_IFDIR:
529 if kind == stat.S_IFDIR:
532 if not ignore(nf):
530 if not ignore(nf):
533 wadd(nf)
531 wadd(nf)
534 if nf in dmap and match(nf):
532 if nf in dmap and match(nf):
535 add((nf, None))
533 yield nf, None
536 elif imatch(nf):
534 elif imatch(nf):
537 if supported(nf, st.st_mode):
535 if supported(nf, st.st_mode):
538 add((nf, st))
536 yield nf, st
539 elif nf in dmap:
537 elif nf in dmap:
540 add((nf, None))
538 yield nf, None
541 for e in util.sort(found):
542 yield e
543
539
544 # step 3: report unseen items in the dmap hash
540 # step 3: report unseen items in the dmap hash
545 for f in util.sort(dmap):
541 for f in util.sort(dmap):
546 if f in seen or not match(f):
542 if f in seen or not match(f):
547 continue
543 continue
548 seen[f] = 1
544 seen[f] = 1
549 try:
545 try:
550 st = lstat(_join(f))
546 st = lstat(_join(f))
551 if supported(f, st.st_mode):
547 if supported(f, st.st_mode):
552 yield f, st
548 yield f, st
553 continue
549 continue
554 except OSError, inst:
550 except OSError, inst:
555 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
551 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
556 raise
552 raise
557 yield f, None
553 yield f, None
558
554
559 def status(self, match, ignored, clean, unknown):
555 def status(self, match, ignored, clean, unknown):
560 listignored, listclean, listunknown = ignored, clean, unknown
556 listignored, listclean, listunknown = ignored, clean, unknown
561 lookup, modified, added, unknown, ignored = [], [], [], [], []
557 lookup, modified, added, unknown, ignored = [], [], [], [], []
562 removed, deleted, clean = [], [], []
558 removed, deleted, clean = [], [], []
563
559
564 _join = self._join
560 _join = self._join
565 lstat = os.lstat
561 lstat = os.lstat
566 cmap = self._copymap
562 cmap = self._copymap
567 dmap = self._map
563 dmap = self._map
568 ladd = lookup.append
564 ladd = lookup.append
569 madd = modified.append
565 madd = modified.append
570 aadd = added.append
566 aadd = added.append
571 uadd = unknown.append
567 uadd = unknown.append
572 iadd = ignored.append
568 iadd = ignored.append
573 radd = removed.append
569 radd = removed.append
574 dadd = deleted.append
570 dadd = deleted.append
575 cadd = clean.append
571 cadd = clean.append
576
572
577 for fn, st in self.walk(match, listunknown, listignored):
573 for fn, st in self.walk(match, listunknown, listignored):
578 if fn not in dmap:
574 if fn not in dmap:
579 if (listignored or match.exact(fn)) and self._dirignore(fn):
575 if (listignored or match.exact(fn)) and self._dirignore(fn):
580 if listignored:
576 if listignored:
581 iadd(fn)
577 iadd(fn)
582 elif listunknown:
578 elif listunknown:
583 uadd(fn)
579 uadd(fn)
584 continue
580 continue
585
581
586 state, mode, size, time, foo = dmap[fn]
582 state, mode, size, time, foo = dmap[fn]
587
583
588 if not st and state in "nma":
584 if not st and state in "nma":
589 dadd(fn)
585 dadd(fn)
590 elif state == 'n':
586 elif state == 'n':
591 if (size >= 0 and
587 if (size >= 0 and
592 (size != st.st_size
588 (size != st.st_size
593 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
589 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
594 or size == -2
590 or size == -2
595 or fn in self._copymap):
591 or fn in self._copymap):
596 madd(fn)
592 madd(fn)
597 elif time != int(st.st_mtime):
593 elif time != int(st.st_mtime):
598 ladd(fn)
594 ladd(fn)
599 elif listclean:
595 elif listclean:
600 cadd(fn)
596 cadd(fn)
601 elif state == 'm':
597 elif state == 'm':
602 madd(fn)
598 madd(fn)
603 elif state == 'a':
599 elif state == 'a':
604 aadd(fn)
600 aadd(fn)
605 elif state == 'r':
601 elif state == 'r':
606 radd(fn)
602 radd(fn)
607
603
608 return (lookup, modified, added, removed, deleted, unknown, ignored,
604 return (lookup, modified, added, removed, deleted, unknown, ignored,
609 clean)
605 clean)
@@ -1,2077 +1,2076
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72
72
73 try:
73 try:
74 # files in .hg/ will be created using this mode
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
78 mode = None
79 except OSError:
79 except OSError:
80 mode = None
80 mode = None
81
81
82 self._createmode = mode
82 self._createmode = mode
83 self.opener.createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = util.encodedopener(sopener, self.encodefn)
87
87
88 self.ui = ui.ui(parentui=parentui)
88 self.ui = ui.ui(parentui=parentui)
89 try:
89 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
90 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
91 extensions.loadall(self.ui)
92 except IOError:
92 except IOError:
93 pass
93 pass
94
94
95 self.tagscache = None
95 self.tagscache = None
96 self._tagstypecache = None
96 self._tagstypecache = None
97 self.branchcache = None
97 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
98 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
99 self._branchcachetip = None
100 self.nodetagscache = None
100 self.nodetagscache = None
101 self.filterpats = {}
101 self.filterpats = {}
102 self._datafilters = {}
102 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
103 self._transref = self._lockref = self._wlockref = None
104
104
105 def __getattr__(self, name):
105 def __getattr__(self, name):
106 if name == 'changelog':
106 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
107 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
108 self.sopener.defversion = self.changelog.version
109 return self.changelog
109 return self.changelog
110 if name == 'manifest':
110 if name == 'manifest':
111 self.changelog
111 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
112 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
113 return self.manifest
114 if name == 'dirstate':
114 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
116 return self.dirstate
117 else:
117 else:
118 raise AttributeError, name
118 raise AttributeError, name
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid == None:
121 if changeid == None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, parent=None,
143 def _tag(self, names, node, message, local, user, date, parent=None,
144 extra={}):
144 extra={}):
145 use_dirstate = parent is None
145 use_dirstate = parent is None
146
146
147 if isinstance(names, str):
147 if isinstance(names, str):
148 allchars = names
148 allchars = names
149 names = (names,)
149 names = (names,)
150 else:
150 else:
151 allchars = ''.join(names)
151 allchars = ''.join(names)
152 for c in self.tag_disallowed:
152 for c in self.tag_disallowed:
153 if c in allchars:
153 if c in allchars:
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
155
155
156 for name in names:
156 for name in names:
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 local=local)
158 local=local)
159
159
160 def writetags(fp, names, munge, prevtags):
160 def writetags(fp, names, munge, prevtags):
161 fp.seek(0, 2)
161 fp.seek(0, 2)
162 if prevtags and prevtags[-1] != '\n':
162 if prevtags and prevtags[-1] != '\n':
163 fp.write('\n')
163 fp.write('\n')
164 for name in names:
164 for name in names:
165 m = munge and munge(name) or name
165 m = munge and munge(name) or name
166 if self._tagstypecache and name in self._tagstypecache:
166 if self._tagstypecache and name in self._tagstypecache:
167 old = self.tagscache.get(name, nullid)
167 old = self.tagscache.get(name, nullid)
168 fp.write('%s %s\n' % (hex(old), m))
168 fp.write('%s %s\n' % (hex(old), m))
169 fp.write('%s %s\n' % (hex(node), m))
169 fp.write('%s %s\n' % (hex(node), m))
170 fp.close()
170 fp.close()
171
171
172 prevtags = ''
172 prevtags = ''
173 if local:
173 if local:
174 try:
174 try:
175 fp = self.opener('localtags', 'r+')
175 fp = self.opener('localtags', 'r+')
176 except IOError, err:
176 except IOError, err:
177 fp = self.opener('localtags', 'a')
177 fp = self.opener('localtags', 'a')
178 else:
178 else:
179 prevtags = fp.read()
179 prevtags = fp.read()
180
180
181 # local tags are stored in the current charset
181 # local tags are stored in the current charset
182 writetags(fp, names, None, prevtags)
182 writetags(fp, names, None, prevtags)
183 for name in names:
183 for name in names:
184 self.hook('tag', node=hex(node), tag=name, local=local)
184 self.hook('tag', node=hex(node), tag=name, local=local)
185 return
185 return
186
186
187 if use_dirstate:
187 if use_dirstate:
188 try:
188 try:
189 fp = self.wfile('.hgtags', 'rb+')
189 fp = self.wfile('.hgtags', 'rb+')
190 except IOError, err:
190 except IOError, err:
191 fp = self.wfile('.hgtags', 'ab')
191 fp = self.wfile('.hgtags', 'ab')
192 else:
192 else:
193 prevtags = fp.read()
193 prevtags = fp.read()
194 else:
194 else:
195 try:
195 try:
196 prevtags = self.filectx('.hgtags', parent).data()
196 prevtags = self.filectx('.hgtags', parent).data()
197 except revlog.LookupError:
197 except revlog.LookupError:
198 pass
198 pass
199 fp = self.wfile('.hgtags', 'wb')
199 fp = self.wfile('.hgtags', 'wb')
200 if prevtags:
200 if prevtags:
201 fp.write(prevtags)
201 fp.write(prevtags)
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, util.fromlocal, prevtags)
204 writetags(fp, names, util.fromlocal, prevtags)
205
205
206 if use_dirstate and '.hgtags' not in self.dirstate:
206 if use_dirstate and '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 extra=extra)
210 extra=extra)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self._tag(names, node, message, local, user, date)
243 self._tag(names, node, message, local, user, date)
244
244
245 def tags(self):
245 def tags(self):
246 '''return a mapping of tag to node'''
246 '''return a mapping of tag to node'''
247 if self.tagscache:
247 if self.tagscache:
248 return self.tagscache
248 return self.tagscache
249
249
250 globaltags = {}
250 globaltags = {}
251 tagtypes = {}
251 tagtypes = {}
252
252
253 def readtags(lines, fn, tagtype):
253 def readtags(lines, fn, tagtype):
254 filetags = {}
254 filetags = {}
255 count = 0
255 count = 0
256
256
257 def warn(msg):
257 def warn(msg):
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259
259
260 for l in lines:
260 for l in lines:
261 count += 1
261 count += 1
262 if not l:
262 if not l:
263 continue
263 continue
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 warn(_("cannot parse entry"))
266 warn(_("cannot parse entry"))
267 continue
267 continue
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 warn(_("node '%s' is not well formed") % node)
273 warn(_("node '%s' is not well formed") % node)
274 continue
274 continue
275 if bin_n not in self.changelog.nodemap:
275 if bin_n not in self.changelog.nodemap:
276 warn(_("tag '%s' refers to unknown node") % key)
276 warn(_("tag '%s' refers to unknown node") % key)
277 continue
277 continue
278
278
279 h = []
279 h = []
280 if key in filetags:
280 if key in filetags:
281 n, h = filetags[key]
281 n, h = filetags[key]
282 h.append(n)
282 h.append(n)
283 filetags[key] = (bin_n, h)
283 filetags[key] = (bin_n, h)
284
284
285 for k, nh in filetags.items():
285 for k, nh in filetags.items():
286 if k not in globaltags:
286 if k not in globaltags:
287 globaltags[k] = nh
287 globaltags[k] = nh
288 tagtypes[k] = tagtype
288 tagtypes[k] = tagtype
289 continue
289 continue
290
290
291 # we prefer the global tag if:
291 # we prefer the global tag if:
292 # it supercedes us OR
292 # it supercedes us OR
293 # mutual supercedes and it has a higher rank
293 # mutual supercedes and it has a higher rank
294 # otherwise we win because we're tip-most
294 # otherwise we win because we're tip-most
295 an, ah = nh
295 an, ah = nh
296 bn, bh = globaltags[k]
296 bn, bh = globaltags[k]
297 if (bn != an and an in bh and
297 if (bn != an and an in bh and
298 (bn not in ah or len(bh) > len(ah))):
298 (bn not in ah or len(bh) > len(ah))):
299 an = bn
299 an = bn
300 ah.extend([n for n in bh if n not in ah])
300 ah.extend([n for n in bh if n not in ah])
301 globaltags[k] = an, ah
301 globaltags[k] = an, ah
302 tagtypes[k] = tagtype
302 tagtypes[k] = tagtype
303
303
304 # read the tags file from each head, ending with the tip
304 # read the tags file from each head, ending with the tip
305 f = None
305 f = None
306 for rev, node, fnode in self._hgtagsnodes():
306 for rev, node, fnode in self._hgtagsnodes():
307 f = (f and f.filectx(fnode) or
307 f = (f and f.filectx(fnode) or
308 self.filectx('.hgtags', fileid=fnode))
308 self.filectx('.hgtags', fileid=fnode))
309 readtags(f.data().splitlines(), f, "global")
309 readtags(f.data().splitlines(), f, "global")
310
310
311 try:
311 try:
312 data = util.fromlocal(self.opener("localtags").read())
312 data = util.fromlocal(self.opener("localtags").read())
313 # localtags are stored in the local character set
313 # localtags are stored in the local character set
314 # while the internal tag table is stored in UTF-8
314 # while the internal tag table is stored in UTF-8
315 readtags(data.splitlines(), "localtags", "local")
315 readtags(data.splitlines(), "localtags", "local")
316 except IOError:
316 except IOError:
317 pass
317 pass
318
318
319 self.tagscache = {}
319 self.tagscache = {}
320 self._tagstypecache = {}
320 self._tagstypecache = {}
321 for k,nh in globaltags.items():
321 for k,nh in globaltags.items():
322 n = nh[0]
322 n = nh[0]
323 if n != nullid:
323 if n != nullid:
324 self.tagscache[k] = n
324 self.tagscache[k] = n
325 self._tagstypecache[k] = tagtypes[k]
325 self._tagstypecache[k] = tagtypes[k]
326 self.tagscache['tip'] = self.changelog.tip()
326 self.tagscache['tip'] = self.changelog.tip()
327 return self.tagscache
327 return self.tagscache
328
328
329 def tagtype(self, tagname):
329 def tagtype(self, tagname):
330 '''
330 '''
331 return the type of the given tag. result can be:
331 return the type of the given tag. result can be:
332
332
333 'local' : a local tag
333 'local' : a local tag
334 'global' : a global tag
334 'global' : a global tag
335 None : tag does not exist
335 None : tag does not exist
336 '''
336 '''
337
337
338 self.tags()
338 self.tags()
339
339
340 return self._tagstypecache.get(tagname)
340 return self._tagstypecache.get(tagname)
341
341
342 def _hgtagsnodes(self):
342 def _hgtagsnodes(self):
343 heads = self.heads()
343 heads = self.heads()
344 heads.reverse()
344 heads.reverse()
345 last = {}
345 last = {}
346 ret = []
346 ret = []
347 for node in heads:
347 for node in heads:
348 c = self[node]
348 c = self[node]
349 rev = c.rev()
349 rev = c.rev()
350 try:
350 try:
351 fnode = c.filenode('.hgtags')
351 fnode = c.filenode('.hgtags')
352 except revlog.LookupError:
352 except revlog.LookupError:
353 continue
353 continue
354 ret.append((rev, node, fnode))
354 ret.append((rev, node, fnode))
355 if fnode in last:
355 if fnode in last:
356 ret[last[fnode]] = None
356 ret[last[fnode]] = None
357 last[fnode] = len(ret) - 1
357 last[fnode] = len(ret) - 1
358 return [item for item in ret if item]
358 return [item for item in ret if item]
359
359
360 def tagslist(self):
360 def tagslist(self):
361 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
362 l = []
362 l = []
363 for t, n in self.tags().items():
363 for t, n in self.tags().items():
364 try:
364 try:
365 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
366 except:
366 except:
367 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
368 l.append((r, t, n))
369 return [(t, n) for r, t, n in util.sort(l)]
369 return [(t, n) for r, t, n in util.sort(l)]
370
370
371 def nodetags(self, node):
371 def nodetags(self, node):
372 '''return the tags associated with a node'''
372 '''return the tags associated with a node'''
373 if not self.nodetagscache:
373 if not self.nodetagscache:
374 self.nodetagscache = {}
374 self.nodetagscache = {}
375 for t, n in self.tags().items():
375 for t, n in self.tags().items():
376 self.nodetagscache.setdefault(n, []).append(t)
376 self.nodetagscache.setdefault(n, []).append(t)
377 return self.nodetagscache.get(node, [])
377 return self.nodetagscache.get(node, [])
378
378
379 def _branchtags(self, partial, lrev):
379 def _branchtags(self, partial, lrev):
380 tiprev = len(self) - 1
380 tiprev = len(self) - 1
381 if lrev != tiprev:
381 if lrev != tiprev:
382 self._updatebranchcache(partial, lrev+1, tiprev+1)
382 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384
384
385 return partial
385 return partial
386
386
387 def branchtags(self):
387 def branchtags(self):
388 tip = self.changelog.tip()
388 tip = self.changelog.tip()
389 if self.branchcache is not None and self._branchcachetip == tip:
389 if self.branchcache is not None and self._branchcachetip == tip:
390 return self.branchcache
390 return self.branchcache
391
391
392 oldtip = self._branchcachetip
392 oldtip = self._branchcachetip
393 self._branchcachetip = tip
393 self._branchcachetip = tip
394 if self.branchcache is None:
394 if self.branchcache is None:
395 self.branchcache = {} # avoid recursion in changectx
395 self.branchcache = {} # avoid recursion in changectx
396 else:
396 else:
397 self.branchcache.clear() # keep using the same dict
397 self.branchcache.clear() # keep using the same dict
398 if oldtip is None or oldtip not in self.changelog.nodemap:
398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
399 partial, last, lrev = self._readbranchcache()
400 else:
400 else:
401 lrev = self.changelog.rev(oldtip)
401 lrev = self.changelog.rev(oldtip)
402 partial = self._ubranchcache
402 partial = self._ubranchcache
403
403
404 self._branchtags(partial, lrev)
404 self._branchtags(partial, lrev)
405
405
406 # the branch cache is stored on disk as UTF-8, but in the local
406 # the branch cache is stored on disk as UTF-8, but in the local
407 # charset internally
407 # charset internally
408 for k, v in partial.items():
408 for k, v in partial.items():
409 self.branchcache[util.tolocal(k)] = v
409 self.branchcache[util.tolocal(k)] = v
410 self._ubranchcache = partial
410 self._ubranchcache = partial
411 return self.branchcache
411 return self.branchcache
412
412
413 def _readbranchcache(self):
413 def _readbranchcache(self):
414 partial = {}
414 partial = {}
415 try:
415 try:
416 f = self.opener("branch.cache")
416 f = self.opener("branch.cache")
417 lines = f.read().split('\n')
417 lines = f.read().split('\n')
418 f.close()
418 f.close()
419 except (IOError, OSError):
419 except (IOError, OSError):
420 return {}, nullid, nullrev
420 return {}, nullid, nullrev
421
421
422 try:
422 try:
423 last, lrev = lines.pop(0).split(" ", 1)
423 last, lrev = lines.pop(0).split(" ", 1)
424 last, lrev = bin(last), int(lrev)
424 last, lrev = bin(last), int(lrev)
425 if lrev >= len(self) or self[lrev].node() != last:
425 if lrev >= len(self) or self[lrev].node() != last:
426 # invalidate the cache
426 # invalidate the cache
427 raise ValueError('invalidating branch cache (tip differs)')
427 raise ValueError('invalidating branch cache (tip differs)')
428 for l in lines:
428 for l in lines:
429 if not l: continue
429 if not l: continue
430 node, label = l.split(" ", 1)
430 node, label = l.split(" ", 1)
431 partial[label.strip()] = bin(node)
431 partial[label.strip()] = bin(node)
432 except (KeyboardInterrupt, util.SignalInterrupt):
432 except (KeyboardInterrupt, util.SignalInterrupt):
433 raise
433 raise
434 except Exception, inst:
434 except Exception, inst:
435 if self.ui.debugflag:
435 if self.ui.debugflag:
436 self.ui.warn(str(inst), '\n')
436 self.ui.warn(str(inst), '\n')
437 partial, last, lrev = {}, nullid, nullrev
437 partial, last, lrev = {}, nullid, nullrev
438 return partial, last, lrev
438 return partial, last, lrev
439
439
440 def _writebranchcache(self, branches, tip, tiprev):
440 def _writebranchcache(self, branches, tip, tiprev):
441 try:
441 try:
442 f = self.opener("branch.cache", "w", atomictemp=True)
442 f = self.opener("branch.cache", "w", atomictemp=True)
443 f.write("%s %s\n" % (hex(tip), tiprev))
443 f.write("%s %s\n" % (hex(tip), tiprev))
444 for label, node in branches.iteritems():
444 for label, node in branches.iteritems():
445 f.write("%s %s\n" % (hex(node), label))
445 f.write("%s %s\n" % (hex(node), label))
446 f.rename()
446 f.rename()
447 except (IOError, OSError):
447 except (IOError, OSError):
448 pass
448 pass
449
449
450 def _updatebranchcache(self, partial, start, end):
450 def _updatebranchcache(self, partial, start, end):
451 for r in xrange(start, end):
451 for r in xrange(start, end):
452 c = self[r]
452 c = self[r]
453 b = c.branch()
453 b = c.branch()
454 partial[b] = c.node()
454 partial[b] = c.node()
455
455
456 def lookup(self, key):
456 def lookup(self, key):
457 if key == '.':
457 if key == '.':
458 return self.dirstate.parents()[0]
458 return self.dirstate.parents()[0]
459 elif key == 'null':
459 elif key == 'null':
460 return nullid
460 return nullid
461 n = self.changelog._match(key)
461 n = self.changelog._match(key)
462 if n:
462 if n:
463 return n
463 return n
464 if key in self.tags():
464 if key in self.tags():
465 return self.tags()[key]
465 return self.tags()[key]
466 if key in self.branchtags():
466 if key in self.branchtags():
467 return self.branchtags()[key]
467 return self.branchtags()[key]
468 n = self.changelog._partialmatch(key)
468 n = self.changelog._partialmatch(key)
469 if n:
469 if n:
470 return n
470 return n
471 try:
471 try:
472 if len(key) == 20:
472 if len(key) == 20:
473 key = hex(key)
473 key = hex(key)
474 except:
474 except:
475 pass
475 pass
476 raise repo.RepoError(_("unknown revision '%s'") % key)
476 raise repo.RepoError(_("unknown revision '%s'") % key)
477
477
478 def local(self):
478 def local(self):
479 return True
479 return True
480
480
481 def join(self, f):
481 def join(self, f):
482 return os.path.join(self.path, f)
482 return os.path.join(self.path, f)
483
483
484 def sjoin(self, f):
484 def sjoin(self, f):
485 f = self.encodefn(f)
485 f = self.encodefn(f)
486 return os.path.join(self.spath, f)
486 return os.path.join(self.spath, f)
487
487
488 def wjoin(self, f):
488 def wjoin(self, f):
489 return os.path.join(self.root, f)
489 return os.path.join(self.root, f)
490
490
491 def rjoin(self, f):
491 def rjoin(self, f):
492 return os.path.join(self.root, util.pconvert(f))
492 return os.path.join(self.root, util.pconvert(f))
493
493
494 def file(self, f):
494 def file(self, f):
495 if f[0] == '/':
495 if f[0] == '/':
496 f = f[1:]
496 f = f[1:]
497 return filelog.filelog(self.sopener, f)
497 return filelog.filelog(self.sopener, f)
498
498
499 def changectx(self, changeid):
499 def changectx(self, changeid):
500 return self[changeid]
500 return self[changeid]
501
501
502 def parents(self, changeid=None):
502 def parents(self, changeid=None):
503 '''get list of changectxs for parents of changeid'''
503 '''get list of changectxs for parents of changeid'''
504 return self[changeid].parents()
504 return self[changeid].parents()
505
505
506 def filectx(self, path, changeid=None, fileid=None):
506 def filectx(self, path, changeid=None, fileid=None):
507 """changeid can be a changeset revision, node, or tag.
507 """changeid can be a changeset revision, node, or tag.
508 fileid can be a file revision or node."""
508 fileid can be a file revision or node."""
509 return context.filectx(self, path, changeid, fileid)
509 return context.filectx(self, path, changeid, fileid)
510
510
511 def getcwd(self):
511 def getcwd(self):
512 return self.dirstate.getcwd()
512 return self.dirstate.getcwd()
513
513
514 def pathto(self, f, cwd=None):
514 def pathto(self, f, cwd=None):
515 return self.dirstate.pathto(f, cwd)
515 return self.dirstate.pathto(f, cwd)
516
516
517 def wfile(self, f, mode='r'):
517 def wfile(self, f, mode='r'):
518 return self.wopener(f, mode)
518 return self.wopener(f, mode)
519
519
520 def _link(self, f):
520 def _link(self, f):
521 return os.path.islink(self.wjoin(f))
521 return os.path.islink(self.wjoin(f))
522
522
523 def _filter(self, filter, filename, data):
523 def _filter(self, filter, filename, data):
524 if filter not in self.filterpats:
524 if filter not in self.filterpats:
525 l = []
525 l = []
526 for pat, cmd in self.ui.configitems(filter):
526 for pat, cmd in self.ui.configitems(filter):
527 mf = util.matcher(self.root, "", [pat], [], [])[1]
527 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 fn = None
528 fn = None
529 params = cmd
529 params = cmd
530 for name, filterfn in self._datafilters.iteritems():
530 for name, filterfn in self._datafilters.iteritems():
531 if cmd.startswith(name):
531 if cmd.startswith(name):
532 fn = filterfn
532 fn = filterfn
533 params = cmd[len(name):].lstrip()
533 params = cmd[len(name):].lstrip()
534 break
534 break
535 if not fn:
535 if not fn:
536 fn = lambda s, c, **kwargs: util.filter(s, c)
536 fn = lambda s, c, **kwargs: util.filter(s, c)
537 # Wrap old filters not supporting keyword arguments
537 # Wrap old filters not supporting keyword arguments
538 if not inspect.getargspec(fn)[2]:
538 if not inspect.getargspec(fn)[2]:
539 oldfn = fn
539 oldfn = fn
540 fn = lambda s, c, **kwargs: oldfn(s, c)
540 fn = lambda s, c, **kwargs: oldfn(s, c)
541 l.append((mf, fn, params))
541 l.append((mf, fn, params))
542 self.filterpats[filter] = l
542 self.filterpats[filter] = l
543
543
544 for mf, fn, cmd in self.filterpats[filter]:
544 for mf, fn, cmd in self.filterpats[filter]:
545 if mf(filename):
545 if mf(filename):
546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 break
548 break
549
549
550 return data
550 return data
551
551
552 def adddatafilter(self, name, filter):
552 def adddatafilter(self, name, filter):
553 self._datafilters[name] = filter
553 self._datafilters[name] = filter
554
554
555 def wread(self, filename):
555 def wread(self, filename):
556 if self._link(filename):
556 if self._link(filename):
557 data = os.readlink(self.wjoin(filename))
557 data = os.readlink(self.wjoin(filename))
558 else:
558 else:
559 data = self.wopener(filename, 'r').read()
559 data = self.wopener(filename, 'r').read()
560 return self._filter("encode", filename, data)
560 return self._filter("encode", filename, data)
561
561
562 def wwrite(self, filename, data, flags):
562 def wwrite(self, filename, data, flags):
563 data = self._filter("decode", filename, data)
563 data = self._filter("decode", filename, data)
564 try:
564 try:
565 os.unlink(self.wjoin(filename))
565 os.unlink(self.wjoin(filename))
566 except OSError:
566 except OSError:
567 pass
567 pass
568 self.wopener(filename, 'w').write(data)
568 self.wopener(filename, 'w').write(data)
569 util.set_flags(self.wjoin(filename), flags)
569 util.set_flags(self.wjoin(filename), flags)
570
570
571 def wwritedata(self, filename, data):
571 def wwritedata(self, filename, data):
572 return self._filter("decode", filename, data)
572 return self._filter("decode", filename, data)
573
573
574 def transaction(self):
574 def transaction(self):
575 if self._transref and self._transref():
575 if self._transref and self._transref():
576 return self._transref().nest()
576 return self._transref().nest()
577
577
578 # abort here if the journal already exists
578 # abort here if the journal already exists
579 if os.path.exists(self.sjoin("journal")):
579 if os.path.exists(self.sjoin("journal")):
580 raise repo.RepoError(_("journal already exists - run hg recover"))
580 raise repo.RepoError(_("journal already exists - run hg recover"))
581
581
582 # save dirstate for rollback
582 # save dirstate for rollback
583 try:
583 try:
584 ds = self.opener("dirstate").read()
584 ds = self.opener("dirstate").read()
585 except IOError:
585 except IOError:
586 ds = ""
586 ds = ""
587 self.opener("journal.dirstate", "w").write(ds)
587 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.branch", "w").write(self.dirstate.branch())
588 self.opener("journal.branch", "w").write(self.dirstate.branch())
589
589
590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.branch"), self.join("undo.branch"))]
592 (self.join("journal.branch"), self.join("undo.branch"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
594 self.sjoin("journal"),
595 aftertrans(renames),
595 aftertrans(renames),
596 self._createmode)
596 self._createmode)
597 self._transref = weakref.ref(tr)
597 self._transref = weakref.ref(tr)
598 return tr
598 return tr
599
599
600 def recover(self):
600 def recover(self):
601 l = self.lock()
601 l = self.lock()
602 try:
602 try:
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"))
605 transaction.rollback(self.sopener, self.sjoin("journal"))
606 self.invalidate()
606 self.invalidate()
607 return True
607 return True
608 else:
608 else:
609 self.ui.warn(_("no interrupted transaction available\n"))
609 self.ui.warn(_("no interrupted transaction available\n"))
610 return False
610 return False
611 finally:
611 finally:
612 del l
612 del l
613
613
614 def rollback(self):
614 def rollback(self):
615 wlock = lock = None
615 wlock = lock = None
616 try:
616 try:
617 wlock = self.wlock()
617 wlock = self.wlock()
618 lock = self.lock()
618 lock = self.lock()
619 if os.path.exists(self.sjoin("undo")):
619 if os.path.exists(self.sjoin("undo")):
620 self.ui.status(_("rolling back last transaction\n"))
620 self.ui.status(_("rolling back last transaction\n"))
621 transaction.rollback(self.sopener, self.sjoin("undo"))
621 transaction.rollback(self.sopener, self.sjoin("undo"))
622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 try:
623 try:
624 branch = self.opener("undo.branch").read()
624 branch = self.opener("undo.branch").read()
625 self.dirstate.setbranch(branch)
625 self.dirstate.setbranch(branch)
626 except IOError:
626 except IOError:
627 self.ui.warn(_("Named branch could not be reset, "
627 self.ui.warn(_("Named branch could not be reset, "
628 "current branch still is: %s\n")
628 "current branch still is: %s\n")
629 % util.tolocal(self.dirstate.branch()))
629 % util.tolocal(self.dirstate.branch()))
630 self.invalidate()
630 self.invalidate()
631 self.dirstate.invalidate()
631 self.dirstate.invalidate()
632 else:
632 else:
633 self.ui.warn(_("no rollback information available\n"))
633 self.ui.warn(_("no rollback information available\n"))
634 finally:
634 finally:
635 del lock, wlock
635 del lock, wlock
636
636
637 def invalidate(self):
637 def invalidate(self):
638 for a in "changelog manifest".split():
638 for a in "changelog manifest".split():
639 if a in self.__dict__:
639 if a in self.__dict__:
640 delattr(self, a)
640 delattr(self, a)
641 self.tagscache = None
641 self.tagscache = None
642 self._tagstypecache = None
642 self._tagstypecache = None
643 self.nodetagscache = None
643 self.nodetagscache = None
644 self.branchcache = None
644 self.branchcache = None
645 self._ubranchcache = None
645 self._ubranchcache = None
646 self._branchcachetip = None
646 self._branchcachetip = None
647
647
648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 try:
649 try:
650 l = lock.lock(lockname, 0, releasefn, desc=desc)
650 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 except lock.LockHeld, inst:
651 except lock.LockHeld, inst:
652 if not wait:
652 if not wait:
653 raise
653 raise
654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 (desc, inst.locker))
655 (desc, inst.locker))
656 # default to 600 seconds timeout
656 # default to 600 seconds timeout
657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 releasefn, desc=desc)
658 releasefn, desc=desc)
659 if acquirefn:
659 if acquirefn:
660 acquirefn()
660 acquirefn()
661 return l
661 return l
662
662
663 def lock(self, wait=True):
663 def lock(self, wait=True):
664 if self._lockref and self._lockref():
664 if self._lockref and self._lockref():
665 return self._lockref()
665 return self._lockref()
666
666
667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 _('repository %s') % self.origroot)
668 _('repository %s') % self.origroot)
669 self._lockref = weakref.ref(l)
669 self._lockref = weakref.ref(l)
670 return l
670 return l
671
671
672 def wlock(self, wait=True):
672 def wlock(self, wait=True):
673 if self._wlockref and self._wlockref():
673 if self._wlockref and self._wlockref():
674 return self._wlockref()
674 return self._wlockref()
675
675
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
677 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
678 self.origroot)
679 self._wlockref = weakref.ref(l)
679 self._wlockref = weakref.ref(l)
680 return l
680 return l
681
681
682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
683 """
684 commit an individual file as part of a larger transaction
684 commit an individual file as part of a larger transaction
685 """
685 """
686
686
687 fn = fctx.path()
687 fn = fctx.path()
688 t = fctx.data()
688 t = fctx.data()
689 fl = self.file(fn)
689 fl = self.file(fn)
690 fp1 = manifest1.get(fn, nullid)
690 fp1 = manifest1.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
692
692
693 meta = {}
693 meta = {}
694 cp = fctx.renamed()
694 cp = fctx.renamed()
695 if cp and cp[0] != fn:
695 if cp and cp[0] != fn:
696 cp = cp[0]
696 cp = cp[0]
697 # Mark the new revision of this file as a copy of another
697 # Mark the new revision of this file as a copy of another
698 # file. This copy data will effectively act as a parent
698 # file. This copy data will effectively act as a parent
699 # of this new revision. If this is a merge, the first
699 # of this new revision. If this is a merge, the first
700 # parent will be the nullid (meaning "look up the copy data")
700 # parent will be the nullid (meaning "look up the copy data")
701 # and the second one will be the other parent. For example:
701 # and the second one will be the other parent. For example:
702 #
702 #
703 # 0 --- 1 --- 3 rev1 changes file foo
703 # 0 --- 1 --- 3 rev1 changes file foo
704 # \ / rev2 renames foo to bar and changes it
704 # \ / rev2 renames foo to bar and changes it
705 # \- 2 -/ rev3 should have bar with all changes and
705 # \- 2 -/ rev3 should have bar with all changes and
706 # should record that bar descends from
706 # should record that bar descends from
707 # bar in rev2 and foo in rev1
707 # bar in rev2 and foo in rev1
708 #
708 #
709 # this allows this merge to succeed:
709 # this allows this merge to succeed:
710 #
710 #
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
713 # \- 2 --- 4 as the merge base
713 # \- 2 --- 4 as the merge base
714 #
714 #
715 meta["copy"] = cp
715 meta["copy"] = cp
716 if not manifest2: # not a branch merge
716 if not manifest2: # not a branch merge
717 meta["copyrev"] = hex(manifest1[cp])
717 meta["copyrev"] = hex(manifest1[cp])
718 fp2 = nullid
718 fp2 = nullid
719 elif fp2 != nullid: # copied on remote side
719 elif fp2 != nullid: # copied on remote side
720 meta["copyrev"] = hex(manifest1[cp])
720 meta["copyrev"] = hex(manifest1[cp])
721 elif fp1 != nullid: # copied on local side, reversed
721 elif fp1 != nullid: # copied on local side, reversed
722 meta["copyrev"] = hex(manifest2[cp])
722 meta["copyrev"] = hex(manifest2[cp])
723 fp2 = fp1
723 fp2 = fp1
724 elif cp in manifest2: # directory rename on local side
724 elif cp in manifest2: # directory rename on local side
725 meta["copyrev"] = hex(manifest2[cp])
725 meta["copyrev"] = hex(manifest2[cp])
726 else: # directory rename on remote side
726 else: # directory rename on remote side
727 meta["copyrev"] = hex(manifest1[cp])
727 meta["copyrev"] = hex(manifest1[cp])
728 self.ui.debug(_(" %s: copy %s:%s\n") %
728 self.ui.debug(_(" %s: copy %s:%s\n") %
729 (fn, cp, meta["copyrev"]))
729 (fn, cp, meta["copyrev"]))
730 fp1 = nullid
730 fp1 = nullid
731 elif fp2 != nullid:
731 elif fp2 != nullid:
732 # is one parent an ancestor of the other?
732 # is one parent an ancestor of the other?
733 fpa = fl.ancestor(fp1, fp2)
733 fpa = fl.ancestor(fp1, fp2)
734 if fpa == fp1:
734 if fpa == fp1:
735 fp1, fp2 = fp2, nullid
735 fp1, fp2 = fp2, nullid
736 elif fpa == fp2:
736 elif fpa == fp2:
737 fp2 = nullid
737 fp2 = nullid
738
738
739 # is the file unmodified from the parent? report existing entry
739 # is the file unmodified from the parent? report existing entry
740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 return fp1
741 return fp1
742
742
743 changelist.append(fn)
743 changelist.append(fn)
744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745
745
746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 if p1 is None:
747 if p1 is None:
748 p1, p2 = self.dirstate.parents()
748 p1, p2 = self.dirstate.parents()
749 return self.commit(files=files, text=text, user=user, date=date,
749 return self.commit(files=files, text=text, user=user, date=date,
750 p1=p1, p2=p2, extra=extra, empty_ok=True)
750 p1=p1, p2=p2, extra=extra, empty_ok=True)
751
751
752 def commit(self, files=None, text="", user=None, date=None,
752 def commit(self, files=None, text="", user=None, date=None,
753 match=None, force=False, force_editor=False,
753 match=None, force=False, force_editor=False,
754 p1=None, p2=None, extra={}, empty_ok=False):
754 p1=None, p2=None, extra={}, empty_ok=False):
755 wlock = lock = None
755 wlock = lock = None
756 if files:
756 if files:
757 files = util.unique(files)
757 files = util.unique(files)
758 try:
758 try:
759 wlock = self.wlock()
759 wlock = self.wlock()
760 lock = self.lock()
760 lock = self.lock()
761 use_dirstate = (p1 is None) # not rawcommit
761 use_dirstate = (p1 is None) # not rawcommit
762
762
763 if use_dirstate:
763 if use_dirstate:
764 p1, p2 = self.dirstate.parents()
764 p1, p2 = self.dirstate.parents()
765 update_dirstate = True
765 update_dirstate = True
766
766
767 if (not force and p2 != nullid and
767 if (not force and p2 != nullid and
768 (match and (match.files() or match.anypats()))):
768 (match and (match.files() or match.anypats()))):
769 raise util.Abort(_('cannot partially commit a merge '
769 raise util.Abort(_('cannot partially commit a merge '
770 '(do not specify files or patterns)'))
770 '(do not specify files or patterns)'))
771
771
772 if files:
772 if files:
773 modified, removed = [], []
773 modified, removed = [], []
774 for f in files:
774 for f in files:
775 s = self.dirstate[f]
775 s = self.dirstate[f]
776 if s in 'nma':
776 if s in 'nma':
777 modified.append(f)
777 modified.append(f)
778 elif s == 'r':
778 elif s == 'r':
779 removed.append(f)
779 removed.append(f)
780 else:
780 else:
781 self.ui.warn(_("%s not tracked!\n") % f)
781 self.ui.warn(_("%s not tracked!\n") % f)
782 changes = [modified, [], removed, [], []]
782 changes = [modified, [], removed, [], []]
783 else:
783 else:
784 changes = self.status(match=match)
784 changes = self.status(match=match)
785 else:
785 else:
786 p1, p2 = p1, p2 or nullid
786 p1, p2 = p1, p2 or nullid
787 update_dirstate = (self.dirstate.parents()[0] == p1)
787 update_dirstate = (self.dirstate.parents()[0] == p1)
788 changes = [files, [], [], [], []]
788 changes = [files, [], [], [], []]
789
789
790 wctx = context.workingctx(self, (p1, p2), text, user, date,
790 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 extra, changes)
791 extra, changes)
792 return self._commitctx(wctx, force, force_editor, empty_ok,
792 return self._commitctx(wctx, force, force_editor, empty_ok,
793 use_dirstate, update_dirstate)
793 use_dirstate, update_dirstate)
794 finally:
794 finally:
795 del lock, wlock
795 del lock, wlock
796
796
797 def commitctx(self, ctx):
797 def commitctx(self, ctx):
798 wlock = lock = None
798 wlock = lock = None
799 try:
799 try:
800 wlock = self.wlock()
800 wlock = self.wlock()
801 lock = self.lock()
801 lock = self.lock()
802 return self._commitctx(ctx, force=True, force_editor=False,
802 return self._commitctx(ctx, force=True, force_editor=False,
803 empty_ok=True, use_dirstate=False,
803 empty_ok=True, use_dirstate=False,
804 update_dirstate=False)
804 update_dirstate=False)
805 finally:
805 finally:
806 del lock, wlock
806 del lock, wlock
807
807
808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 use_dirstate=True, update_dirstate=True):
809 use_dirstate=True, update_dirstate=True):
810 tr = None
810 tr = None
811 valid = 0 # don't save the dirstate if this isn't set
811 valid = 0 # don't save the dirstate if this isn't set
812 try:
812 try:
813 commit = util.sort(wctx.modified() + wctx.added())
813 commit = util.sort(wctx.modified() + wctx.added())
814 remove = wctx.removed()
814 remove = wctx.removed()
815 extra = wctx.extra().copy()
815 extra = wctx.extra().copy()
816 branchname = extra['branch']
816 branchname = extra['branch']
817 user = wctx.user()
817 user = wctx.user()
818 text = wctx.description()
818 text = wctx.description()
819
819
820 p1, p2 = [p.node() for p in wctx.parents()]
820 p1, p2 = [p.node() for p in wctx.parents()]
821 c1 = self.changelog.read(p1)
821 c1 = self.changelog.read(p1)
822 c2 = self.changelog.read(p2)
822 c2 = self.changelog.read(p2)
823 m1 = self.manifest.read(c1[0]).copy()
823 m1 = self.manifest.read(c1[0]).copy()
824 m2 = self.manifest.read(c2[0])
824 m2 = self.manifest.read(c2[0])
825
825
826 if use_dirstate:
826 if use_dirstate:
827 oldname = c1[5].get("branch") # stored in UTF-8
827 oldname = c1[5].get("branch") # stored in UTF-8
828 if (not commit and not remove and not force and p2 == nullid
828 if (not commit and not remove and not force and p2 == nullid
829 and branchname == oldname):
829 and branchname == oldname):
830 self.ui.status(_("nothing changed\n"))
830 self.ui.status(_("nothing changed\n"))
831 return None
831 return None
832
832
833 xp1 = hex(p1)
833 xp1 = hex(p1)
834 if p2 == nullid: xp2 = ''
834 if p2 == nullid: xp2 = ''
835 else: xp2 = hex(p2)
835 else: xp2 = hex(p2)
836
836
837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838
838
839 tr = self.transaction()
839 tr = self.transaction()
840 trp = weakref.proxy(tr)
840 trp = weakref.proxy(tr)
841
841
842 # check in files
842 # check in files
843 new = {}
843 new = {}
844 changed = []
844 changed = []
845 linkrev = len(self)
845 linkrev = len(self)
846 for f in commit:
846 for f in commit:
847 self.ui.note(f + "\n")
847 self.ui.note(f + "\n")
848 try:
848 try:
849 fctx = wctx.filectx(f)
849 fctx = wctx.filectx(f)
850 newflags = fctx.flags()
850 newflags = fctx.flags()
851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 if ((not changed or changed[-1] != f) and
852 if ((not changed or changed[-1] != f) and
853 m2.get(f) != new[f]):
853 m2.get(f) != new[f]):
854 # mention the file in the changelog if some
854 # mention the file in the changelog if some
855 # flag changed, even if there was no content
855 # flag changed, even if there was no content
856 # change.
856 # change.
857 if m1.flags(f) != newflags:
857 if m1.flags(f) != newflags:
858 changed.append(f)
858 changed.append(f)
859 m1.set(f, newflags)
859 m1.set(f, newflags)
860 if use_dirstate:
860 if use_dirstate:
861 self.dirstate.normal(f)
861 self.dirstate.normal(f)
862
862
863 except (OSError, IOError):
863 except (OSError, IOError):
864 if use_dirstate:
864 if use_dirstate:
865 self.ui.warn(_("trouble committing %s!\n") % f)
865 self.ui.warn(_("trouble committing %s!\n") % f)
866 raise
866 raise
867 else:
867 else:
868 remove.append(f)
868 remove.append(f)
869
869
870 # update manifest
870 # update manifest
871 m1.update(new)
871 m1.update(new)
872 removed = []
872 removed = []
873
873
874 for f in util.sort(remove):
874 for f in util.sort(remove):
875 if f in m1:
875 if f in m1:
876 del m1[f]
876 del m1[f]
877 removed.append(f)
877 removed.append(f)
878 elif f in m2:
878 elif f in m2:
879 removed.append(f)
879 removed.append(f)
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
881 (new, removed))
881 (new, removed))
882
882
883 # add changeset
883 # add changeset
884 if (not empty_ok and not text) or force_editor:
884 if (not empty_ok and not text) or force_editor:
885 edittext = []
885 edittext = []
886 if text:
886 if text:
887 edittext.append(text)
887 edittext.append(text)
888 edittext.append("")
888 edittext.append("")
889 edittext.append(_("HG: Enter commit message."
889 edittext.append(_("HG: Enter commit message."
890 " Lines beginning with 'HG:' are removed."))
890 " Lines beginning with 'HG:' are removed."))
891 edittext.append("HG: --")
891 edittext.append("HG: --")
892 edittext.append("HG: user: %s" % user)
892 edittext.append("HG: user: %s" % user)
893 if p2 != nullid:
893 if p2 != nullid:
894 edittext.append("HG: branch merge")
894 edittext.append("HG: branch merge")
895 if branchname:
895 if branchname:
896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 edittext.extend(["HG: changed %s" % f for f in changed])
897 edittext.extend(["HG: changed %s" % f for f in changed])
898 edittext.extend(["HG: removed %s" % f for f in removed])
898 edittext.extend(["HG: removed %s" % f for f in removed])
899 if not changed and not remove:
899 if not changed and not remove:
900 edittext.append("HG: no files changed")
900 edittext.append("HG: no files changed")
901 edittext.append("")
901 edittext.append("")
902 # run editor in the repository root
902 # run editor in the repository root
903 olddir = os.getcwd()
903 olddir = os.getcwd()
904 os.chdir(self.root)
904 os.chdir(self.root)
905 text = self.ui.edit("\n".join(edittext), user)
905 text = self.ui.edit("\n".join(edittext), user)
906 os.chdir(olddir)
906 os.chdir(olddir)
907
907
908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
909 while lines and not lines[0]:
909 while lines and not lines[0]:
910 del lines[0]
910 del lines[0]
911 if not lines and use_dirstate:
911 if not lines and use_dirstate:
912 raise util.Abort(_("empty commit message"))
912 raise util.Abort(_("empty commit message"))
913 text = '\n'.join(lines)
913 text = '\n'.join(lines)
914
914
915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
916 user, wctx.date(), extra)
916 user, wctx.date(), extra)
917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
918 parent2=xp2)
918 parent2=xp2)
919 tr.close()
919 tr.close()
920
920
921 if self.branchcache:
921 if self.branchcache:
922 self.branchtags()
922 self.branchtags()
923
923
924 if use_dirstate or update_dirstate:
924 if use_dirstate or update_dirstate:
925 self.dirstate.setparents(n)
925 self.dirstate.setparents(n)
926 if use_dirstate:
926 if use_dirstate:
927 for f in removed:
927 for f in removed:
928 self.dirstate.forget(f)
928 self.dirstate.forget(f)
929 valid = 1 # our dirstate updates are complete
929 valid = 1 # our dirstate updates are complete
930
930
931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 return n
932 return n
933 finally:
933 finally:
934 if not valid: # don't save our updated dirstate
934 if not valid: # don't save our updated dirstate
935 self.dirstate.invalidate()
935 self.dirstate.invalidate()
936 del tr
936 del tr
937
937
938 def walk(self, match, node=None):
938 def walk(self, match, node=None):
939 '''
939 '''
940 walk recursively through the directory tree or a given
940 walk recursively through the directory tree or a given
941 changeset, finding all files matched by the match
941 changeset, finding all files matched by the match
942 function
942 function
943 '''
943 '''
944 return self[node].walk(match)
944 return self[node].walk(match)
945
945
946 def status(self, node1='.', node2=None, match=None,
946 def status(self, node1='.', node2=None, match=None,
947 ignored=False, clean=False, unknown=False):
947 ignored=False, clean=False, unknown=False):
948 """return status of files between two nodes or node and working directory
948 """return status of files between two nodes or node and working directory
949
949
950 If node1 is None, use the first dirstate parent instead.
950 If node1 is None, use the first dirstate parent instead.
951 If node2 is None, compare node1 with working directory.
951 If node2 is None, compare node1 with working directory.
952 """
952 """
953
953
954 def mfmatches(ctx):
954 def mfmatches(ctx):
955 mf = ctx.manifest().copy()
955 mf = ctx.manifest().copy()
956 for fn in mf.keys():
956 for fn in mf.keys():
957 if not match(fn):
957 if not match(fn):
958 del mf[fn]
958 del mf[fn]
959 return mf
959 return mf
960
960
961 ctx1 = self[node1]
961 ctx1 = self[node1]
962 ctx2 = self[node2]
962 ctx2 = self[node2]
963 working = ctx2 == self[None]
963 working = ctx2 == self[None]
964 parentworking = working and ctx1 == self['.']
964 parentworking = working and ctx1 == self['.']
965 match = match or match_.always(self.root, self.getcwd())
965 match = match or match_.always(self.root, self.getcwd())
966 listignored, listclean, listunknown = ignored, clean, unknown
966 listignored, listclean, listunknown = ignored, clean, unknown
967
967
968 if working: # we need to scan the working dir
968 if working: # we need to scan the working dir
969 s = self.dirstate.status(match, listignored, listclean, listunknown)
969 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971 removed.sort()
972 deleted.sort()
973
971
974 # check for any possibly clean files
972 # check for any possibly clean files
975 if parentworking and cmp:
973 if parentworking and cmp:
976 fixup = []
974 fixup = []
977 # do a full compare of any files that might have changed
975 # do a full compare of any files that might have changed
978 for f in cmp:
976 for f in cmp:
979 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
980 or ctx1[f].cmp(ctx2[f].data())):
978 or ctx1[f].cmp(ctx2[f].data())):
981 modified.append(f)
979 modified.append(f)
982 else:
980 else:
983 fixup.append(f)
981 fixup.append(f)
984
982
985 modified.sort()
986 if listclean:
983 if listclean:
987 clean = util.sort(clean + fixup)
984 clean += fixup
988
985
989 # update dirstate for files that are actually clean
986 # update dirstate for files that are actually clean
990 if fixup:
987 if fixup:
991 wlock = None
988 wlock = None
992 try:
989 try:
993 try:
990 try:
994 wlock = self.wlock(False)
991 wlock = self.wlock(False)
995 for f in fixup:
992 for f in fixup:
996 self.dirstate.normal(f)
993 self.dirstate.normal(f)
997 except lock.LockException:
994 except lock.LockException:
998 pass
995 pass
999 finally:
996 finally:
1000 del wlock
997 del wlock
1001
998
1002 if not parentworking:
999 if not parentworking:
1003 mf1 = mfmatches(ctx1)
1000 mf1 = mfmatches(ctx1)
1004 if working:
1001 if working:
1005 # we are comparing working dir against non-parent
1002 # we are comparing working dir against non-parent
1006 # generate a pseudo-manifest for the working dir
1003 # generate a pseudo-manifest for the working dir
1007 mf2 = mfmatches(self['.'])
1004 mf2 = mfmatches(self['.'])
1008 for f in cmp + modified + added:
1005 for f in cmp + modified + added:
1009 mf2[f] = None
1006 mf2[f] = None
1010 mf2.set(f, ctx2.flags(f))
1007 mf2.set(f, ctx2.flags(f))
1011 for f in removed:
1008 for f in removed:
1012 if f in mf2:
1009 if f in mf2:
1013 del mf2[f]
1010 del mf2[f]
1014 else:
1011 else:
1015 # we are comparing two revisions
1012 # we are comparing two revisions
1016 deleted, unknown, ignored = [], [], []
1013 deleted, unknown, ignored = [], [], []
1017 mf2 = mfmatches(ctx2)
1014 mf2 = mfmatches(ctx2)
1018
1015
1019 modified, added, clean = [], [], []
1016 modified, added, clean = [], [], []
1020 for fn in util.sort(mf2):
1017 for fn in mf2:
1021 if fn in mf1:
1018 if fn in mf1:
1022 if (mf1.flags(fn) != mf2.flags(fn) or
1019 if (mf1.flags(fn) != mf2.flags(fn) or
1023 (mf1[fn] != mf2[fn] and
1020 (mf1[fn] != mf2[fn] and
1024 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1021 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1025 modified.append(fn)
1022 modified.append(fn)
1026 elif listclean:
1023 elif listclean:
1027 clean.append(fn)
1024 clean.append(fn)
1028 del mf1[fn]
1025 del mf1[fn]
1029 else:
1026 else:
1030 added.append(fn)
1027 added.append(fn)
1031 removed = util.sort(mf1.keys())
1028 removed = mf1.keys()
1032
1029
1033 return modified, added, removed, deleted, unknown, ignored, clean
1030 r = modified, added, removed, deleted, unknown, ignored, clean
1031 [l.sort() for l in r]
1032 return r
1034
1033
1035 def add(self, list):
1034 def add(self, list):
1036 wlock = self.wlock()
1035 wlock = self.wlock()
1037 try:
1036 try:
1038 rejected = []
1037 rejected = []
1039 for f in list:
1038 for f in list:
1040 p = self.wjoin(f)
1039 p = self.wjoin(f)
1041 try:
1040 try:
1042 st = os.lstat(p)
1041 st = os.lstat(p)
1043 except:
1042 except:
1044 self.ui.warn(_("%s does not exist!\n") % f)
1043 self.ui.warn(_("%s does not exist!\n") % f)
1045 rejected.append(f)
1044 rejected.append(f)
1046 continue
1045 continue
1047 if st.st_size > 10000000:
1046 if st.st_size > 10000000:
1048 self.ui.warn(_("%s: files over 10MB may cause memory and"
1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 " performance problems\n"
1048 " performance problems\n"
1050 "(use 'hg revert %s' to unadd the file)\n")
1049 "(use 'hg revert %s' to unadd the file)\n")
1051 % (f, f))
1050 % (f, f))
1052 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 self.ui.warn(_("%s not added: only files and symlinks "
1052 self.ui.warn(_("%s not added: only files and symlinks "
1054 "supported currently\n") % f)
1053 "supported currently\n") % f)
1055 rejected.append(p)
1054 rejected.append(p)
1056 elif self.dirstate[f] in 'amn':
1055 elif self.dirstate[f] in 'amn':
1057 self.ui.warn(_("%s already tracked!\n") % f)
1056 self.ui.warn(_("%s already tracked!\n") % f)
1058 elif self.dirstate[f] == 'r':
1057 elif self.dirstate[f] == 'r':
1059 self.dirstate.normallookup(f)
1058 self.dirstate.normallookup(f)
1060 else:
1059 else:
1061 self.dirstate.add(f)
1060 self.dirstate.add(f)
1062 return rejected
1061 return rejected
1063 finally:
1062 finally:
1064 del wlock
1063 del wlock
1065
1064
1066 def forget(self, list):
1065 def forget(self, list):
1067 wlock = self.wlock()
1066 wlock = self.wlock()
1068 try:
1067 try:
1069 for f in list:
1068 for f in list:
1070 if self.dirstate[f] != 'a':
1069 if self.dirstate[f] != 'a':
1071 self.ui.warn(_("%s not added!\n") % f)
1070 self.ui.warn(_("%s not added!\n") % f)
1072 else:
1071 else:
1073 self.dirstate.forget(f)
1072 self.dirstate.forget(f)
1074 finally:
1073 finally:
1075 del wlock
1074 del wlock
1076
1075
1077 def remove(self, list, unlink=False):
1076 def remove(self, list, unlink=False):
1078 wlock = None
1077 wlock = None
1079 try:
1078 try:
1080 if unlink:
1079 if unlink:
1081 for f in list:
1080 for f in list:
1082 try:
1081 try:
1083 util.unlink(self.wjoin(f))
1082 util.unlink(self.wjoin(f))
1084 except OSError, inst:
1083 except OSError, inst:
1085 if inst.errno != errno.ENOENT:
1084 if inst.errno != errno.ENOENT:
1086 raise
1085 raise
1087 wlock = self.wlock()
1086 wlock = self.wlock()
1088 for f in list:
1087 for f in list:
1089 if unlink and os.path.exists(self.wjoin(f)):
1088 if unlink and os.path.exists(self.wjoin(f)):
1090 self.ui.warn(_("%s still exists!\n") % f)
1089 self.ui.warn(_("%s still exists!\n") % f)
1091 elif self.dirstate[f] == 'a':
1090 elif self.dirstate[f] == 'a':
1092 self.dirstate.forget(f)
1091 self.dirstate.forget(f)
1093 elif f not in self.dirstate:
1092 elif f not in self.dirstate:
1094 self.ui.warn(_("%s not tracked!\n") % f)
1093 self.ui.warn(_("%s not tracked!\n") % f)
1095 else:
1094 else:
1096 self.dirstate.remove(f)
1095 self.dirstate.remove(f)
1097 finally:
1096 finally:
1098 del wlock
1097 del wlock
1099
1098
1100 def undelete(self, list):
1099 def undelete(self, list):
1101 wlock = None
1100 wlock = None
1102 try:
1101 try:
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 for p in self.dirstate.parents() if p != nullid]
1103 for p in self.dirstate.parents() if p != nullid]
1105 wlock = self.wlock()
1104 wlock = self.wlock()
1106 for f in list:
1105 for f in list:
1107 if self.dirstate[f] != 'r':
1106 if self.dirstate[f] != 'r':
1108 self.ui.warn("%s not removed!\n" % f)
1107 self.ui.warn("%s not removed!\n" % f)
1109 else:
1108 else:
1110 m = f in manifests[0] and manifests[0] or manifests[1]
1109 m = f in manifests[0] and manifests[0] or manifests[1]
1111 t = self.file(f).read(m[f])
1110 t = self.file(f).read(m[f])
1112 self.wwrite(f, t, m.flags(f))
1111 self.wwrite(f, t, m.flags(f))
1113 self.dirstate.normal(f)
1112 self.dirstate.normal(f)
1114 finally:
1113 finally:
1115 del wlock
1114 del wlock
1116
1115
1117 def copy(self, source, dest):
1116 def copy(self, source, dest):
1118 wlock = None
1117 wlock = None
1119 try:
1118 try:
1120 p = self.wjoin(dest)
1119 p = self.wjoin(dest)
1121 if not (os.path.exists(p) or os.path.islink(p)):
1120 if not (os.path.exists(p) or os.path.islink(p)):
1122 self.ui.warn(_("%s does not exist!\n") % dest)
1121 self.ui.warn(_("%s does not exist!\n") % dest)
1123 elif not (os.path.isfile(p) or os.path.islink(p)):
1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 self.ui.warn(_("copy failed: %s is not a file or a "
1123 self.ui.warn(_("copy failed: %s is not a file or a "
1125 "symbolic link\n") % dest)
1124 "symbolic link\n") % dest)
1126 else:
1125 else:
1127 wlock = self.wlock()
1126 wlock = self.wlock()
1128 if dest not in self.dirstate:
1127 if dest not in self.dirstate:
1129 self.dirstate.add(dest)
1128 self.dirstate.add(dest)
1130 self.dirstate.copy(source, dest)
1129 self.dirstate.copy(source, dest)
1131 finally:
1130 finally:
1132 del wlock
1131 del wlock
1133
1132
1134 def heads(self, start=None):
1133 def heads(self, start=None):
1135 heads = self.changelog.heads(start)
1134 heads = self.changelog.heads(start)
1136 # sort the output in rev descending order
1135 # sort the output in rev descending order
1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 return [n for (r, n) in util.sort(heads)]
1137 return [n for (r, n) in util.sort(heads)]
1139
1138
1140 def branchheads(self, branch=None, start=None):
1139 def branchheads(self, branch=None, start=None):
1141 if branch is None:
1140 if branch is None:
1142 branch = self[None].branch()
1141 branch = self[None].branch()
1143 branches = self.branchtags()
1142 branches = self.branchtags()
1144 if branch not in branches:
1143 if branch not in branches:
1145 return []
1144 return []
1146 # The basic algorithm is this:
1145 # The basic algorithm is this:
1147 #
1146 #
1148 # Start from the branch tip since there are no later revisions that can
1147 # Start from the branch tip since there are no later revisions that can
1149 # possibly be in this branch, and the tip is a guaranteed head.
1148 # possibly be in this branch, and the tip is a guaranteed head.
1150 #
1149 #
1151 # Remember the tip's parents as the first ancestors, since these by
1150 # Remember the tip's parents as the first ancestors, since these by
1152 # definition are not heads.
1151 # definition are not heads.
1153 #
1152 #
1154 # Step backwards from the brach tip through all the revisions. We are
1153 # Step backwards from the brach tip through all the revisions. We are
1155 # guaranteed by the rules of Mercurial that we will now be visiting the
1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1156 # nodes in reverse topological order (children before parents).
1155 # nodes in reverse topological order (children before parents).
1157 #
1156 #
1158 # If a revision is one of the ancestors of a head then we can toss it
1157 # If a revision is one of the ancestors of a head then we can toss it
1159 # out of the ancestors set (we've already found it and won't be
1158 # out of the ancestors set (we've already found it and won't be
1160 # visiting it again) and put its parents in the ancestors set.
1159 # visiting it again) and put its parents in the ancestors set.
1161 #
1160 #
1162 # Otherwise, if a revision is in the branch it's another head, since it
1161 # Otherwise, if a revision is in the branch it's another head, since it
1163 # wasn't in the ancestor list of an existing head. So add it to the
1162 # wasn't in the ancestor list of an existing head. So add it to the
1164 # head list, and add its parents to the ancestor list.
1163 # head list, and add its parents to the ancestor list.
1165 #
1164 #
1166 # If it is not in the branch ignore it.
1165 # If it is not in the branch ignore it.
1167 #
1166 #
1168 # Once we have a list of heads, use nodesbetween to filter out all the
1167 # Once we have a list of heads, use nodesbetween to filter out all the
1169 # heads that cannot be reached from startrev. There may be a more
1168 # heads that cannot be reached from startrev. There may be a more
1170 # efficient way to do this as part of the previous algorithm.
1169 # efficient way to do this as part of the previous algorithm.
1171
1170
1172 set = util.set
1171 set = util.set
1173 heads = [self.changelog.rev(branches[branch])]
1172 heads = [self.changelog.rev(branches[branch])]
1174 # Don't care if ancestors contains nullrev or not.
1173 # Don't care if ancestors contains nullrev or not.
1175 ancestors = set(self.changelog.parentrevs(heads[0]))
1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1176 for rev in xrange(heads[0] - 1, nullrev, -1):
1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1177 if rev in ancestors:
1176 if rev in ancestors:
1178 ancestors.update(self.changelog.parentrevs(rev))
1177 ancestors.update(self.changelog.parentrevs(rev))
1179 ancestors.remove(rev)
1178 ancestors.remove(rev)
1180 elif self[rev].branch() == branch:
1179 elif self[rev].branch() == branch:
1181 heads.append(rev)
1180 heads.append(rev)
1182 ancestors.update(self.changelog.parentrevs(rev))
1181 ancestors.update(self.changelog.parentrevs(rev))
1183 heads = [self.changelog.node(rev) for rev in heads]
1182 heads = [self.changelog.node(rev) for rev in heads]
1184 if start is not None:
1183 if start is not None:
1185 heads = self.changelog.nodesbetween([start], heads)[2]
1184 heads = self.changelog.nodesbetween([start], heads)[2]
1186 return heads
1185 return heads
1187
1186
1188 def branches(self, nodes):
1187 def branches(self, nodes):
1189 if not nodes:
1188 if not nodes:
1190 nodes = [self.changelog.tip()]
1189 nodes = [self.changelog.tip()]
1191 b = []
1190 b = []
1192 for n in nodes:
1191 for n in nodes:
1193 t = n
1192 t = n
1194 while 1:
1193 while 1:
1195 p = self.changelog.parents(n)
1194 p = self.changelog.parents(n)
1196 if p[1] != nullid or p[0] == nullid:
1195 if p[1] != nullid or p[0] == nullid:
1197 b.append((t, n, p[0], p[1]))
1196 b.append((t, n, p[0], p[1]))
1198 break
1197 break
1199 n = p[0]
1198 n = p[0]
1200 return b
1199 return b
1201
1200
1202 def between(self, pairs):
1201 def between(self, pairs):
1203 r = []
1202 r = []
1204
1203
1205 for top, bottom in pairs:
1204 for top, bottom in pairs:
1206 n, l, i = top, [], 0
1205 n, l, i = top, [], 0
1207 f = 1
1206 f = 1
1208
1207
1209 while n != bottom:
1208 while n != bottom:
1210 p = self.changelog.parents(n)[0]
1209 p = self.changelog.parents(n)[0]
1211 if i == f:
1210 if i == f:
1212 l.append(n)
1211 l.append(n)
1213 f = f * 2
1212 f = f * 2
1214 n = p
1213 n = p
1215 i += 1
1214 i += 1
1216
1215
1217 r.append(l)
1216 r.append(l)
1218
1217
1219 return r
1218 return r
1220
1219
1221 def findincoming(self, remote, base=None, heads=None, force=False):
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1222 """Return list of roots of the subsets of missing nodes from remote
1221 """Return list of roots of the subsets of missing nodes from remote
1223
1222
1224 If base dict is specified, assume that these nodes and their parents
1223 If base dict is specified, assume that these nodes and their parents
1225 exist on the remote side and that no child of a node of base exists
1224 exist on the remote side and that no child of a node of base exists
1226 in both remote and self.
1225 in both remote and self.
1227 Furthermore base will be updated to include the nodes that exists
1226 Furthermore base will be updated to include the nodes that exists
1228 in self and remote but no children exists in self and remote.
1227 in self and remote but no children exists in self and remote.
1229 If a list of heads is specified, return only nodes which are heads
1228 If a list of heads is specified, return only nodes which are heads
1230 or ancestors of these heads.
1229 or ancestors of these heads.
1231
1230
1232 All the ancestors of base are in self and in remote.
1231 All the ancestors of base are in self and in remote.
1233 All the descendants of the list returned are missing in self.
1232 All the descendants of the list returned are missing in self.
1234 (and so we know that the rest of the nodes are missing in remote, see
1233 (and so we know that the rest of the nodes are missing in remote, see
1235 outgoing)
1234 outgoing)
1236 """
1235 """
1237 m = self.changelog.nodemap
1236 m = self.changelog.nodemap
1238 search = []
1237 search = []
1239 fetch = {}
1238 fetch = {}
1240 seen = {}
1239 seen = {}
1241 seenbranch = {}
1240 seenbranch = {}
1242 if base == None:
1241 if base == None:
1243 base = {}
1242 base = {}
1244
1243
1245 if not heads:
1244 if not heads:
1246 heads = remote.heads()
1245 heads = remote.heads()
1247
1246
1248 if self.changelog.tip() == nullid:
1247 if self.changelog.tip() == nullid:
1249 base[nullid] = 1
1248 base[nullid] = 1
1250 if heads != [nullid]:
1249 if heads != [nullid]:
1251 return [nullid]
1250 return [nullid]
1252 return []
1251 return []
1253
1252
1254 # assume we're closer to the tip than the root
1253 # assume we're closer to the tip than the root
1255 # and start by examining the heads
1254 # and start by examining the heads
1256 self.ui.status(_("searching for changes\n"))
1255 self.ui.status(_("searching for changes\n"))
1257
1256
1258 unknown = []
1257 unknown = []
1259 for h in heads:
1258 for h in heads:
1260 if h not in m:
1259 if h not in m:
1261 unknown.append(h)
1260 unknown.append(h)
1262 else:
1261 else:
1263 base[h] = 1
1262 base[h] = 1
1264
1263
1265 if not unknown:
1264 if not unknown:
1266 return []
1265 return []
1267
1266
1268 req = dict.fromkeys(unknown)
1267 req = dict.fromkeys(unknown)
1269 reqcnt = 0
1268 reqcnt = 0
1270
1269
1271 # search through remote branches
1270 # search through remote branches
1272 # a 'branch' here is a linear segment of history, with four parts:
1271 # a 'branch' here is a linear segment of history, with four parts:
1273 # head, root, first parent, second parent
1272 # head, root, first parent, second parent
1274 # (a branch always has two parents (or none) by definition)
1273 # (a branch always has two parents (or none) by definition)
1275 unknown = remote.branches(unknown)
1274 unknown = remote.branches(unknown)
1276 while unknown:
1275 while unknown:
1277 r = []
1276 r = []
1278 while unknown:
1277 while unknown:
1279 n = unknown.pop(0)
1278 n = unknown.pop(0)
1280 if n[0] in seen:
1279 if n[0] in seen:
1281 continue
1280 continue
1282
1281
1283 self.ui.debug(_("examining %s:%s\n")
1282 self.ui.debug(_("examining %s:%s\n")
1284 % (short(n[0]), short(n[1])))
1283 % (short(n[0]), short(n[1])))
1285 if n[0] == nullid: # found the end of the branch
1284 if n[0] == nullid: # found the end of the branch
1286 pass
1285 pass
1287 elif n in seenbranch:
1286 elif n in seenbranch:
1288 self.ui.debug(_("branch already found\n"))
1287 self.ui.debug(_("branch already found\n"))
1289 continue
1288 continue
1290 elif n[1] and n[1] in m: # do we know the base?
1289 elif n[1] and n[1] in m: # do we know the base?
1291 self.ui.debug(_("found incomplete branch %s:%s\n")
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1292 % (short(n[0]), short(n[1])))
1291 % (short(n[0]), short(n[1])))
1293 search.append(n) # schedule branch range for scanning
1292 search.append(n) # schedule branch range for scanning
1294 seenbranch[n] = 1
1293 seenbranch[n] = 1
1295 else:
1294 else:
1296 if n[1] not in seen and n[1] not in fetch:
1295 if n[1] not in seen and n[1] not in fetch:
1297 if n[2] in m and n[3] in m:
1296 if n[2] in m and n[3] in m:
1298 self.ui.debug(_("found new changeset %s\n") %
1297 self.ui.debug(_("found new changeset %s\n") %
1299 short(n[1]))
1298 short(n[1]))
1300 fetch[n[1]] = 1 # earliest unknown
1299 fetch[n[1]] = 1 # earliest unknown
1301 for p in n[2:4]:
1300 for p in n[2:4]:
1302 if p in m:
1301 if p in m:
1303 base[p] = 1 # latest known
1302 base[p] = 1 # latest known
1304
1303
1305 for p in n[2:4]:
1304 for p in n[2:4]:
1306 if p not in req and p not in m:
1305 if p not in req and p not in m:
1307 r.append(p)
1306 r.append(p)
1308 req[p] = 1
1307 req[p] = 1
1309 seen[n[0]] = 1
1308 seen[n[0]] = 1
1310
1309
1311 if r:
1310 if r:
1312 reqcnt += 1
1311 reqcnt += 1
1313 self.ui.debug(_("request %d: %s\n") %
1312 self.ui.debug(_("request %d: %s\n") %
1314 (reqcnt, " ".join(map(short, r))))
1313 (reqcnt, " ".join(map(short, r))))
1315 for p in xrange(0, len(r), 10):
1314 for p in xrange(0, len(r), 10):
1316 for b in remote.branches(r[p:p+10]):
1315 for b in remote.branches(r[p:p+10]):
1317 self.ui.debug(_("received %s:%s\n") %
1316 self.ui.debug(_("received %s:%s\n") %
1318 (short(b[0]), short(b[1])))
1317 (short(b[0]), short(b[1])))
1319 unknown.append(b)
1318 unknown.append(b)
1320
1319
1321 # do binary search on the branches we found
1320 # do binary search on the branches we found
1322 while search:
1321 while search:
1323 n = search.pop(0)
1322 n = search.pop(0)
1324 reqcnt += 1
1323 reqcnt += 1
1325 l = remote.between([(n[0], n[1])])[0]
1324 l = remote.between([(n[0], n[1])])[0]
1326 l.append(n[1])
1325 l.append(n[1])
1327 p = n[0]
1326 p = n[0]
1328 f = 1
1327 f = 1
1329 for i in l:
1328 for i in l:
1330 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1331 if i in m:
1330 if i in m:
1332 if f <= 2:
1331 if f <= 2:
1333 self.ui.debug(_("found new branch changeset %s\n") %
1332 self.ui.debug(_("found new branch changeset %s\n") %
1334 short(p))
1333 short(p))
1335 fetch[p] = 1
1334 fetch[p] = 1
1336 base[i] = 1
1335 base[i] = 1
1337 else:
1336 else:
1338 self.ui.debug(_("narrowed branch search to %s:%s\n")
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1339 % (short(p), short(i)))
1338 % (short(p), short(i)))
1340 search.append((p, i))
1339 search.append((p, i))
1341 break
1340 break
1342 p, f = i, f * 2
1341 p, f = i, f * 2
1343
1342
1344 # sanity check our fetch list
1343 # sanity check our fetch list
1345 for f in fetch.keys():
1344 for f in fetch.keys():
1346 if f in m:
1345 if f in m:
1347 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1348
1347
1349 if base.keys() == [nullid]:
1348 if base.keys() == [nullid]:
1350 if force:
1349 if force:
1351 self.ui.warn(_("warning: repository is unrelated\n"))
1350 self.ui.warn(_("warning: repository is unrelated\n"))
1352 else:
1351 else:
1353 raise util.Abort(_("repository is unrelated"))
1352 raise util.Abort(_("repository is unrelated"))
1354
1353
1355 self.ui.debug(_("found new changesets starting at ") +
1354 self.ui.debug(_("found new changesets starting at ") +
1356 " ".join([short(f) for f in fetch]) + "\n")
1355 " ".join([short(f) for f in fetch]) + "\n")
1357
1356
1358 self.ui.debug(_("%d total queries\n") % reqcnt)
1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1359
1358
1360 return fetch.keys()
1359 return fetch.keys()
1361
1360
1362 def findoutgoing(self, remote, base=None, heads=None, force=False):
1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1363 """Return list of nodes that are roots of subsets not in remote
1362 """Return list of nodes that are roots of subsets not in remote
1364
1363
1365 If base dict is specified, assume that these nodes and their parents
1364 If base dict is specified, assume that these nodes and their parents
1366 exist on the remote side.
1365 exist on the remote side.
1367 If a list of heads is specified, return only nodes which are heads
1366 If a list of heads is specified, return only nodes which are heads
1368 or ancestors of these heads, and return a second element which
1367 or ancestors of these heads, and return a second element which
1369 contains all remote heads which get new children.
1368 contains all remote heads which get new children.
1370 """
1369 """
1371 if base == None:
1370 if base == None:
1372 base = {}
1371 base = {}
1373 self.findincoming(remote, base, heads, force=force)
1372 self.findincoming(remote, base, heads, force=force)
1374
1373
1375 self.ui.debug(_("common changesets up to ")
1374 self.ui.debug(_("common changesets up to ")
1376 + " ".join(map(short, base.keys())) + "\n")
1375 + " ".join(map(short, base.keys())) + "\n")
1377
1376
1378 remain = dict.fromkeys(self.changelog.nodemap)
1377 remain = dict.fromkeys(self.changelog.nodemap)
1379
1378
1380 # prune everything remote has from the tree
1379 # prune everything remote has from the tree
1381 del remain[nullid]
1380 del remain[nullid]
1382 remove = base.keys()
1381 remove = base.keys()
1383 while remove:
1382 while remove:
1384 n = remove.pop(0)
1383 n = remove.pop(0)
1385 if n in remain:
1384 if n in remain:
1386 del remain[n]
1385 del remain[n]
1387 for p in self.changelog.parents(n):
1386 for p in self.changelog.parents(n):
1388 remove.append(p)
1387 remove.append(p)
1389
1388
1390 # find every node whose parents have been pruned
1389 # find every node whose parents have been pruned
1391 subset = []
1390 subset = []
1392 # find every remote head that will get new children
1391 # find every remote head that will get new children
1393 updated_heads = {}
1392 updated_heads = {}
1394 for n in remain:
1393 for n in remain:
1395 p1, p2 = self.changelog.parents(n)
1394 p1, p2 = self.changelog.parents(n)
1396 if p1 not in remain and p2 not in remain:
1395 if p1 not in remain and p2 not in remain:
1397 subset.append(n)
1396 subset.append(n)
1398 if heads:
1397 if heads:
1399 if p1 in heads:
1398 if p1 in heads:
1400 updated_heads[p1] = True
1399 updated_heads[p1] = True
1401 if p2 in heads:
1400 if p2 in heads:
1402 updated_heads[p2] = True
1401 updated_heads[p2] = True
1403
1402
1404 # this is the set of all roots we have to push
1403 # this is the set of all roots we have to push
1405 if heads:
1404 if heads:
1406 return subset, updated_heads.keys()
1405 return subset, updated_heads.keys()
1407 else:
1406 else:
1408 return subset
1407 return subset
1409
1408
1410 def pull(self, remote, heads=None, force=False):
1409 def pull(self, remote, heads=None, force=False):
1411 lock = self.lock()
1410 lock = self.lock()
1412 try:
1411 try:
1413 fetch = self.findincoming(remote, heads=heads, force=force)
1412 fetch = self.findincoming(remote, heads=heads, force=force)
1414 if fetch == [nullid]:
1413 if fetch == [nullid]:
1415 self.ui.status(_("requesting all changes\n"))
1414 self.ui.status(_("requesting all changes\n"))
1416
1415
1417 if not fetch:
1416 if not fetch:
1418 self.ui.status(_("no changes found\n"))
1417 self.ui.status(_("no changes found\n"))
1419 return 0
1418 return 0
1420
1419
1421 if heads is None:
1420 if heads is None:
1422 cg = remote.changegroup(fetch, 'pull')
1421 cg = remote.changegroup(fetch, 'pull')
1423 else:
1422 else:
1424 if 'changegroupsubset' not in remote.capabilities:
1423 if 'changegroupsubset' not in remote.capabilities:
1425 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1426 cg = remote.changegroupsubset(fetch, heads, 'pull')
1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1427 return self.addchangegroup(cg, 'pull', remote.url())
1426 return self.addchangegroup(cg, 'pull', remote.url())
1428 finally:
1427 finally:
1429 del lock
1428 del lock
1430
1429
1431 def push(self, remote, force=False, revs=None):
1430 def push(self, remote, force=False, revs=None):
1432 # there are two ways to push to remote repo:
1431 # there are two ways to push to remote repo:
1433 #
1432 #
1434 # addchangegroup assumes local user can lock remote
1433 # addchangegroup assumes local user can lock remote
1435 # repo (local filesystem, old ssh servers).
1434 # repo (local filesystem, old ssh servers).
1436 #
1435 #
1437 # unbundle assumes local user cannot lock remote repo (new ssh
1436 # unbundle assumes local user cannot lock remote repo (new ssh
1438 # servers, http servers).
1437 # servers, http servers).
1439
1438
1440 if remote.capable('unbundle'):
1439 if remote.capable('unbundle'):
1441 return self.push_unbundle(remote, force, revs)
1440 return self.push_unbundle(remote, force, revs)
1442 return self.push_addchangegroup(remote, force, revs)
1441 return self.push_addchangegroup(remote, force, revs)
1443
1442
1444 def prepush(self, remote, force, revs):
1443 def prepush(self, remote, force, revs):
1445 base = {}
1444 base = {}
1446 remote_heads = remote.heads()
1445 remote_heads = remote.heads()
1447 inc = self.findincoming(remote, base, remote_heads, force=force)
1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1448
1447
1449 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1450 if revs is not None:
1449 if revs is not None:
1451 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1452 else:
1451 else:
1453 bases, heads = update, self.changelog.heads()
1452 bases, heads = update, self.changelog.heads()
1454
1453
1455 if not bases:
1454 if not bases:
1456 self.ui.status(_("no changes found\n"))
1455 self.ui.status(_("no changes found\n"))
1457 return None, 1
1456 return None, 1
1458 elif not force:
1457 elif not force:
1459 # check if we're creating new remote heads
1458 # check if we're creating new remote heads
1460 # to be a remote head after push, node must be either
1459 # to be a remote head after push, node must be either
1461 # - unknown locally
1460 # - unknown locally
1462 # - a local outgoing head descended from update
1461 # - a local outgoing head descended from update
1463 # - a remote head that's known locally and not
1462 # - a remote head that's known locally and not
1464 # ancestral to an outgoing head
1463 # ancestral to an outgoing head
1465
1464
1466 warn = 0
1465 warn = 0
1467
1466
1468 if remote_heads == [nullid]:
1467 if remote_heads == [nullid]:
1469 warn = 0
1468 warn = 0
1470 elif not revs and len(heads) > len(remote_heads):
1469 elif not revs and len(heads) > len(remote_heads):
1471 warn = 1
1470 warn = 1
1472 else:
1471 else:
1473 newheads = list(heads)
1472 newheads = list(heads)
1474 for r in remote_heads:
1473 for r in remote_heads:
1475 if r in self.changelog.nodemap:
1474 if r in self.changelog.nodemap:
1476 desc = self.changelog.heads(r, heads)
1475 desc = self.changelog.heads(r, heads)
1477 l = [h for h in heads if h in desc]
1476 l = [h for h in heads if h in desc]
1478 if not l:
1477 if not l:
1479 newheads.append(r)
1478 newheads.append(r)
1480 else:
1479 else:
1481 newheads.append(r)
1480 newheads.append(r)
1482 if len(newheads) > len(remote_heads):
1481 if len(newheads) > len(remote_heads):
1483 warn = 1
1482 warn = 1
1484
1483
1485 if warn:
1484 if warn:
1486 self.ui.warn(_("abort: push creates new remote heads!\n"))
1485 self.ui.warn(_("abort: push creates new remote heads!\n"))
1487 self.ui.status(_("(did you forget to merge?"
1486 self.ui.status(_("(did you forget to merge?"
1488 " use push -f to force)\n"))
1487 " use push -f to force)\n"))
1489 return None, 0
1488 return None, 0
1490 elif inc:
1489 elif inc:
1491 self.ui.warn(_("note: unsynced remote changes!\n"))
1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1492
1491
1493
1492
1494 if revs is None:
1493 if revs is None:
1495 cg = self.changegroup(update, 'push')
1494 cg = self.changegroup(update, 'push')
1496 else:
1495 else:
1497 cg = self.changegroupsubset(update, revs, 'push')
1496 cg = self.changegroupsubset(update, revs, 'push')
1498 return cg, remote_heads
1497 return cg, remote_heads
1499
1498
1500 def push_addchangegroup(self, remote, force, revs):
1499 def push_addchangegroup(self, remote, force, revs):
1501 lock = remote.lock()
1500 lock = remote.lock()
1502 try:
1501 try:
1503 ret = self.prepush(remote, force, revs)
1502 ret = self.prepush(remote, force, revs)
1504 if ret[0] is not None:
1503 if ret[0] is not None:
1505 cg, remote_heads = ret
1504 cg, remote_heads = ret
1506 return remote.addchangegroup(cg, 'push', self.url())
1505 return remote.addchangegroup(cg, 'push', self.url())
1507 return ret[1]
1506 return ret[1]
1508 finally:
1507 finally:
1509 del lock
1508 del lock
1510
1509
1511 def push_unbundle(self, remote, force, revs):
1510 def push_unbundle(self, remote, force, revs):
1512 # local repo finds heads on server, finds out what revs it
1511 # local repo finds heads on server, finds out what revs it
1513 # must push. once revs transferred, if server finds it has
1512 # must push. once revs transferred, if server finds it has
1514 # different heads (someone else won commit/push race), server
1513 # different heads (someone else won commit/push race), server
1515 # aborts.
1514 # aborts.
1516
1515
1517 ret = self.prepush(remote, force, revs)
1516 ret = self.prepush(remote, force, revs)
1518 if ret[0] is not None:
1517 if ret[0] is not None:
1519 cg, remote_heads = ret
1518 cg, remote_heads = ret
1520 if force: remote_heads = ['force']
1519 if force: remote_heads = ['force']
1521 return remote.unbundle(cg, remote_heads, 'push')
1520 return remote.unbundle(cg, remote_heads, 'push')
1522 return ret[1]
1521 return ret[1]
1523
1522
1524 def changegroupinfo(self, nodes, source):
1523 def changegroupinfo(self, nodes, source):
1525 if self.ui.verbose or source == 'bundle':
1524 if self.ui.verbose or source == 'bundle':
1526 self.ui.status(_("%d changesets found\n") % len(nodes))
1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1527 if self.ui.debugflag:
1526 if self.ui.debugflag:
1528 self.ui.debug(_("List of changesets:\n"))
1527 self.ui.debug(_("List of changesets:\n"))
1529 for node in nodes:
1528 for node in nodes:
1530 self.ui.debug("%s\n" % hex(node))
1529 self.ui.debug("%s\n" % hex(node))
1531
1530
1532 def changegroupsubset(self, bases, heads, source, extranodes=None):
1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1533 """This function generates a changegroup consisting of all the nodes
1532 """This function generates a changegroup consisting of all the nodes
1534 that are descendents of any of the bases, and ancestors of any of
1533 that are descendents of any of the bases, and ancestors of any of
1535 the heads.
1534 the heads.
1536
1535
1537 It is fairly complex as determining which filenodes and which
1536 It is fairly complex as determining which filenodes and which
1538 manifest nodes need to be included for the changeset to be complete
1537 manifest nodes need to be included for the changeset to be complete
1539 is non-trivial.
1538 is non-trivial.
1540
1539
1541 Another wrinkle is doing the reverse, figuring out which changeset in
1540 Another wrinkle is doing the reverse, figuring out which changeset in
1542 the changegroup a particular filenode or manifestnode belongs to.
1541 the changegroup a particular filenode or manifestnode belongs to.
1543
1542
1544 The caller can specify some nodes that must be included in the
1543 The caller can specify some nodes that must be included in the
1545 changegroup using the extranodes argument. It should be a dict
1544 changegroup using the extranodes argument. It should be a dict
1546 where the keys are the filenames (or 1 for the manifest), and the
1545 where the keys are the filenames (or 1 for the manifest), and the
1547 values are lists of (node, linknode) tuples, where node is a wanted
1546 values are lists of (node, linknode) tuples, where node is a wanted
1548 node and linknode is the changelog node that should be transmitted as
1547 node and linknode is the changelog node that should be transmitted as
1549 the linkrev.
1548 the linkrev.
1550 """
1549 """
1551
1550
1552 self.hook('preoutgoing', throw=True, source=source)
1551 self.hook('preoutgoing', throw=True, source=source)
1553
1552
1554 # Set up some initial variables
1553 # Set up some initial variables
1555 # Make it easy to refer to self.changelog
1554 # Make it easy to refer to self.changelog
1556 cl = self.changelog
1555 cl = self.changelog
1557 # msng is short for missing - compute the list of changesets in this
1556 # msng is short for missing - compute the list of changesets in this
1558 # changegroup.
1557 # changegroup.
1559 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1560 self.changegroupinfo(msng_cl_lst, source)
1559 self.changegroupinfo(msng_cl_lst, source)
1561 # Some bases may turn out to be superfluous, and some heads may be
1560 # Some bases may turn out to be superfluous, and some heads may be
1562 # too. nodesbetween will return the minimal set of bases and heads
1561 # too. nodesbetween will return the minimal set of bases and heads
1563 # necessary to re-create the changegroup.
1562 # necessary to re-create the changegroup.
1564
1563
1565 # Known heads are the list of heads that it is assumed the recipient
1564 # Known heads are the list of heads that it is assumed the recipient
1566 # of this changegroup will know about.
1565 # of this changegroup will know about.
1567 knownheads = {}
1566 knownheads = {}
1568 # We assume that all parents of bases are known heads.
1567 # We assume that all parents of bases are known heads.
1569 for n in bases:
1568 for n in bases:
1570 for p in cl.parents(n):
1569 for p in cl.parents(n):
1571 if p != nullid:
1570 if p != nullid:
1572 knownheads[p] = 1
1571 knownheads[p] = 1
1573 knownheads = knownheads.keys()
1572 knownheads = knownheads.keys()
1574 if knownheads:
1573 if knownheads:
1575 # Now that we know what heads are known, we can compute which
1574 # Now that we know what heads are known, we can compute which
1576 # changesets are known. The recipient must know about all
1575 # changesets are known. The recipient must know about all
1577 # changesets required to reach the known heads from the null
1576 # changesets required to reach the known heads from the null
1578 # changeset.
1577 # changeset.
1579 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1580 junk = None
1579 junk = None
1581 # Transform the list into an ersatz set.
1580 # Transform the list into an ersatz set.
1582 has_cl_set = dict.fromkeys(has_cl_set)
1581 has_cl_set = dict.fromkeys(has_cl_set)
1583 else:
1582 else:
1584 # If there were no known heads, the recipient cannot be assumed to
1583 # If there were no known heads, the recipient cannot be assumed to
1585 # know about any changesets.
1584 # know about any changesets.
1586 has_cl_set = {}
1585 has_cl_set = {}
1587
1586
1588 # Make it easy to refer to self.manifest
1587 # Make it easy to refer to self.manifest
1589 mnfst = self.manifest
1588 mnfst = self.manifest
1590 # We don't know which manifests are missing yet
1589 # We don't know which manifests are missing yet
1591 msng_mnfst_set = {}
1590 msng_mnfst_set = {}
1592 # Nor do we know which filenodes are missing.
1591 # Nor do we know which filenodes are missing.
1593 msng_filenode_set = {}
1592 msng_filenode_set = {}
1594
1593
1595 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1594 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1596 junk = None
1595 junk = None
1597
1596
1598 # A changeset always belongs to itself, so the changenode lookup
1597 # A changeset always belongs to itself, so the changenode lookup
1599 # function for a changenode is identity.
1598 # function for a changenode is identity.
1600 def identity(x):
1599 def identity(x):
1601 return x
1600 return x
1602
1601
1603 # A function generating function. Sets up an environment for the
1602 # A function generating function. Sets up an environment for the
1604 # inner function.
1603 # inner function.
1605 def cmp_by_rev_func(revlog):
1604 def cmp_by_rev_func(revlog):
1606 # Compare two nodes by their revision number in the environment's
1605 # Compare two nodes by their revision number in the environment's
1607 # revision history. Since the revision number both represents the
1606 # revision history. Since the revision number both represents the
1608 # most efficient order to read the nodes in, and represents a
1607 # most efficient order to read the nodes in, and represents a
1609 # topological sorting of the nodes, this function is often useful.
1608 # topological sorting of the nodes, this function is often useful.
1610 def cmp_by_rev(a, b):
1609 def cmp_by_rev(a, b):
1611 return cmp(revlog.rev(a), revlog.rev(b))
1610 return cmp(revlog.rev(a), revlog.rev(b))
1612 return cmp_by_rev
1611 return cmp_by_rev
1613
1612
1614 # If we determine that a particular file or manifest node must be a
1613 # If we determine that a particular file or manifest node must be a
1615 # node that the recipient of the changegroup will already have, we can
1614 # node that the recipient of the changegroup will already have, we can
1616 # also assume the recipient will have all the parents. This function
1615 # also assume the recipient will have all the parents. This function
1617 # prunes them from the set of missing nodes.
1616 # prunes them from the set of missing nodes.
1618 def prune_parents(revlog, hasset, msngset):
1617 def prune_parents(revlog, hasset, msngset):
1619 haslst = hasset.keys()
1618 haslst = hasset.keys()
1620 haslst.sort(cmp_by_rev_func(revlog))
1619 haslst.sort(cmp_by_rev_func(revlog))
1621 for node in haslst:
1620 for node in haslst:
1622 parentlst = [p for p in revlog.parents(node) if p != nullid]
1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1623 while parentlst:
1622 while parentlst:
1624 n = parentlst.pop()
1623 n = parentlst.pop()
1625 if n not in hasset:
1624 if n not in hasset:
1626 hasset[n] = 1
1625 hasset[n] = 1
1627 p = [p for p in revlog.parents(n) if p != nullid]
1626 p = [p for p in revlog.parents(n) if p != nullid]
1628 parentlst.extend(p)
1627 parentlst.extend(p)
1629 for n in hasset:
1628 for n in hasset:
1630 msngset.pop(n, None)
1629 msngset.pop(n, None)
1631
1630
1632 # This is a function generating function used to set up an environment
1631 # This is a function generating function used to set up an environment
1633 # for the inner function to execute in.
1632 # for the inner function to execute in.
1634 def manifest_and_file_collector(changedfileset):
1633 def manifest_and_file_collector(changedfileset):
1635 # This is an information gathering function that gathers
1634 # This is an information gathering function that gathers
1636 # information from each changeset node that goes out as part of
1635 # information from each changeset node that goes out as part of
1637 # the changegroup. The information gathered is a list of which
1636 # the changegroup. The information gathered is a list of which
1638 # manifest nodes are potentially required (the recipient may
1637 # manifest nodes are potentially required (the recipient may
1639 # already have them) and total list of all files which were
1638 # already have them) and total list of all files which were
1640 # changed in any changeset in the changegroup.
1639 # changed in any changeset in the changegroup.
1641 #
1640 #
1642 # We also remember the first changenode we saw any manifest
1641 # We also remember the first changenode we saw any manifest
1643 # referenced by so we can later determine which changenode 'owns'
1642 # referenced by so we can later determine which changenode 'owns'
1644 # the manifest.
1643 # the manifest.
1645 def collect_manifests_and_files(clnode):
1644 def collect_manifests_and_files(clnode):
1646 c = cl.read(clnode)
1645 c = cl.read(clnode)
1647 for f in c[3]:
1646 for f in c[3]:
1648 # This is to make sure we only have one instance of each
1647 # This is to make sure we only have one instance of each
1649 # filename string for each filename.
1648 # filename string for each filename.
1650 changedfileset.setdefault(f, f)
1649 changedfileset.setdefault(f, f)
1651 msng_mnfst_set.setdefault(c[0], clnode)
1650 msng_mnfst_set.setdefault(c[0], clnode)
1652 return collect_manifests_and_files
1651 return collect_manifests_and_files
1653
1652
1654 # Figure out which manifest nodes (of the ones we think might be part
1653 # Figure out which manifest nodes (of the ones we think might be part
1655 # of the changegroup) the recipient must know about and remove them
1654 # of the changegroup) the recipient must know about and remove them
1656 # from the changegroup.
1655 # from the changegroup.
1657 def prune_manifests():
1656 def prune_manifests():
1658 has_mnfst_set = {}
1657 has_mnfst_set = {}
1659 for n in msng_mnfst_set:
1658 for n in msng_mnfst_set:
1660 # If a 'missing' manifest thinks it belongs to a changenode
1659 # If a 'missing' manifest thinks it belongs to a changenode
1661 # the recipient is assumed to have, obviously the recipient
1660 # the recipient is assumed to have, obviously the recipient
1662 # must have that manifest.
1661 # must have that manifest.
1663 linknode = cl.node(mnfst.linkrev(n))
1662 linknode = cl.node(mnfst.linkrev(n))
1664 if linknode in has_cl_set:
1663 if linknode in has_cl_set:
1665 has_mnfst_set[n] = 1
1664 has_mnfst_set[n] = 1
1666 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1667
1666
1668 # Use the information collected in collect_manifests_and_files to say
1667 # Use the information collected in collect_manifests_and_files to say
1669 # which changenode any manifestnode belongs to.
1668 # which changenode any manifestnode belongs to.
1670 def lookup_manifest_link(mnfstnode):
1669 def lookup_manifest_link(mnfstnode):
1671 return msng_mnfst_set[mnfstnode]
1670 return msng_mnfst_set[mnfstnode]
1672
1671
1673 # A function generating function that sets up the initial environment
1672 # A function generating function that sets up the initial environment
1674 # the inner function.
1673 # the inner function.
1675 def filenode_collector(changedfiles):
1674 def filenode_collector(changedfiles):
1676 next_rev = [0]
1675 next_rev = [0]
1677 # This gathers information from each manifestnode included in the
1676 # This gathers information from each manifestnode included in the
1678 # changegroup about which filenodes the manifest node references
1677 # changegroup about which filenodes the manifest node references
1679 # so we can include those in the changegroup too.
1678 # so we can include those in the changegroup too.
1680 #
1679 #
1681 # It also remembers which changenode each filenode belongs to. It
1680 # It also remembers which changenode each filenode belongs to. It
1682 # does this by assuming the a filenode belongs to the changenode
1681 # does this by assuming the a filenode belongs to the changenode
1683 # the first manifest that references it belongs to.
1682 # the first manifest that references it belongs to.
1684 def collect_msng_filenodes(mnfstnode):
1683 def collect_msng_filenodes(mnfstnode):
1685 r = mnfst.rev(mnfstnode)
1684 r = mnfst.rev(mnfstnode)
1686 if r == next_rev[0]:
1685 if r == next_rev[0]:
1687 # If the last rev we looked at was the one just previous,
1686 # If the last rev we looked at was the one just previous,
1688 # we only need to see a diff.
1687 # we only need to see a diff.
1689 deltamf = mnfst.readdelta(mnfstnode)
1688 deltamf = mnfst.readdelta(mnfstnode)
1690 # For each line in the delta
1689 # For each line in the delta
1691 for f, fnode in deltamf.items():
1690 for f, fnode in deltamf.items():
1692 f = changedfiles.get(f, None)
1691 f = changedfiles.get(f, None)
1693 # And if the file is in the list of files we care
1692 # And if the file is in the list of files we care
1694 # about.
1693 # about.
1695 if f is not None:
1694 if f is not None:
1696 # Get the changenode this manifest belongs to
1695 # Get the changenode this manifest belongs to
1697 clnode = msng_mnfst_set[mnfstnode]
1696 clnode = msng_mnfst_set[mnfstnode]
1698 # Create the set of filenodes for the file if
1697 # Create the set of filenodes for the file if
1699 # there isn't one already.
1698 # there isn't one already.
1700 ndset = msng_filenode_set.setdefault(f, {})
1699 ndset = msng_filenode_set.setdefault(f, {})
1701 # And set the filenode's changelog node to the
1700 # And set the filenode's changelog node to the
1702 # manifest's if it hasn't been set already.
1701 # manifest's if it hasn't been set already.
1703 ndset.setdefault(fnode, clnode)
1702 ndset.setdefault(fnode, clnode)
1704 else:
1703 else:
1705 # Otherwise we need a full manifest.
1704 # Otherwise we need a full manifest.
1706 m = mnfst.read(mnfstnode)
1705 m = mnfst.read(mnfstnode)
1707 # For every file in we care about.
1706 # For every file in we care about.
1708 for f in changedfiles:
1707 for f in changedfiles:
1709 fnode = m.get(f, None)
1708 fnode = m.get(f, None)
1710 # If it's in the manifest
1709 # If it's in the manifest
1711 if fnode is not None:
1710 if fnode is not None:
1712 # See comments above.
1711 # See comments above.
1713 clnode = msng_mnfst_set[mnfstnode]
1712 clnode = msng_mnfst_set[mnfstnode]
1714 ndset = msng_filenode_set.setdefault(f, {})
1713 ndset = msng_filenode_set.setdefault(f, {})
1715 ndset.setdefault(fnode, clnode)
1714 ndset.setdefault(fnode, clnode)
1716 # Remember the revision we hope to see next.
1715 # Remember the revision we hope to see next.
1717 next_rev[0] = r + 1
1716 next_rev[0] = r + 1
1718 return collect_msng_filenodes
1717 return collect_msng_filenodes
1719
1718
1720 # We have a list of filenodes we think we need for a file, lets remove
1719 # We have a list of filenodes we think we need for a file, lets remove
1721 # all those we now the recipient must have.
1720 # all those we now the recipient must have.
1722 def prune_filenodes(f, filerevlog):
1721 def prune_filenodes(f, filerevlog):
1723 msngset = msng_filenode_set[f]
1722 msngset = msng_filenode_set[f]
1724 hasset = {}
1723 hasset = {}
1725 # If a 'missing' filenode thinks it belongs to a changenode we
1724 # If a 'missing' filenode thinks it belongs to a changenode we
1726 # assume the recipient must have, then the recipient must have
1725 # assume the recipient must have, then the recipient must have
1727 # that filenode.
1726 # that filenode.
1728 for n in msngset:
1727 for n in msngset:
1729 clnode = cl.node(filerevlog.linkrev(n))
1728 clnode = cl.node(filerevlog.linkrev(n))
1730 if clnode in has_cl_set:
1729 if clnode in has_cl_set:
1731 hasset[n] = 1
1730 hasset[n] = 1
1732 prune_parents(filerevlog, hasset, msngset)
1731 prune_parents(filerevlog, hasset, msngset)
1733
1732
1734 # A function generator function that sets up the a context for the
1733 # A function generator function that sets up the a context for the
1735 # inner function.
1734 # inner function.
1736 def lookup_filenode_link_func(fname):
1735 def lookup_filenode_link_func(fname):
1737 msngset = msng_filenode_set[fname]
1736 msngset = msng_filenode_set[fname]
1738 # Lookup the changenode the filenode belongs to.
1737 # Lookup the changenode the filenode belongs to.
1739 def lookup_filenode_link(fnode):
1738 def lookup_filenode_link(fnode):
1740 return msngset[fnode]
1739 return msngset[fnode]
1741 return lookup_filenode_link
1740 return lookup_filenode_link
1742
1741
1743 # Add the nodes that were explicitly requested.
1742 # Add the nodes that were explicitly requested.
1744 def add_extra_nodes(name, nodes):
1743 def add_extra_nodes(name, nodes):
1745 if not extranodes or name not in extranodes:
1744 if not extranodes or name not in extranodes:
1746 return
1745 return
1747
1746
1748 for node, linknode in extranodes[name]:
1747 for node, linknode in extranodes[name]:
1749 if node not in nodes:
1748 if node not in nodes:
1750 nodes[node] = linknode
1749 nodes[node] = linknode
1751
1750
1752 # Now that we have all theses utility functions to help out and
1751 # Now that we have all theses utility functions to help out and
1753 # logically divide up the task, generate the group.
1752 # logically divide up the task, generate the group.
1754 def gengroup():
1753 def gengroup():
1755 # The set of changed files starts empty.
1754 # The set of changed files starts empty.
1756 changedfiles = {}
1755 changedfiles = {}
1757 # Create a changenode group generator that will call our functions
1756 # Create a changenode group generator that will call our functions
1758 # back to lookup the owning changenode and collect information.
1757 # back to lookup the owning changenode and collect information.
1759 group = cl.group(msng_cl_lst, identity,
1758 group = cl.group(msng_cl_lst, identity,
1760 manifest_and_file_collector(changedfiles))
1759 manifest_and_file_collector(changedfiles))
1761 for chnk in group:
1760 for chnk in group:
1762 yield chnk
1761 yield chnk
1763
1762
1764 # The list of manifests has been collected by the generator
1763 # The list of manifests has been collected by the generator
1765 # calling our functions back.
1764 # calling our functions back.
1766 prune_manifests()
1765 prune_manifests()
1767 add_extra_nodes(1, msng_mnfst_set)
1766 add_extra_nodes(1, msng_mnfst_set)
1768 msng_mnfst_lst = msng_mnfst_set.keys()
1767 msng_mnfst_lst = msng_mnfst_set.keys()
1769 # Sort the manifestnodes by revision number.
1768 # Sort the manifestnodes by revision number.
1770 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1771 # Create a generator for the manifestnodes that calls our lookup
1770 # Create a generator for the manifestnodes that calls our lookup
1772 # and data collection functions back.
1771 # and data collection functions back.
1773 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1774 filenode_collector(changedfiles))
1773 filenode_collector(changedfiles))
1775 for chnk in group:
1774 for chnk in group:
1776 yield chnk
1775 yield chnk
1777
1776
1778 # These are no longer needed, dereference and toss the memory for
1777 # These are no longer needed, dereference and toss the memory for
1779 # them.
1778 # them.
1780 msng_mnfst_lst = None
1779 msng_mnfst_lst = None
1781 msng_mnfst_set.clear()
1780 msng_mnfst_set.clear()
1782
1781
1783 if extranodes:
1782 if extranodes:
1784 for fname in extranodes:
1783 for fname in extranodes:
1785 if isinstance(fname, int):
1784 if isinstance(fname, int):
1786 continue
1785 continue
1787 add_extra_nodes(fname,
1786 add_extra_nodes(fname,
1788 msng_filenode_set.setdefault(fname, {}))
1787 msng_filenode_set.setdefault(fname, {}))
1789 changedfiles[fname] = 1
1788 changedfiles[fname] = 1
1790 # Go through all our files in order sorted by name.
1789 # Go through all our files in order sorted by name.
1791 for fname in util.sort(changedfiles):
1790 for fname in util.sort(changedfiles):
1792 filerevlog = self.file(fname)
1791 filerevlog = self.file(fname)
1793 if not len(filerevlog):
1792 if not len(filerevlog):
1794 raise util.Abort(_("empty or missing revlog for %s") % fname)
1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1795 # Toss out the filenodes that the recipient isn't really
1794 # Toss out the filenodes that the recipient isn't really
1796 # missing.
1795 # missing.
1797 if fname in msng_filenode_set:
1796 if fname in msng_filenode_set:
1798 prune_filenodes(fname, filerevlog)
1797 prune_filenodes(fname, filerevlog)
1799 msng_filenode_lst = msng_filenode_set[fname].keys()
1798 msng_filenode_lst = msng_filenode_set[fname].keys()
1800 else:
1799 else:
1801 msng_filenode_lst = []
1800 msng_filenode_lst = []
1802 # If any filenodes are left, generate the group for them,
1801 # If any filenodes are left, generate the group for them,
1803 # otherwise don't bother.
1802 # otherwise don't bother.
1804 if len(msng_filenode_lst) > 0:
1803 if len(msng_filenode_lst) > 0:
1805 yield changegroup.chunkheader(len(fname))
1804 yield changegroup.chunkheader(len(fname))
1806 yield fname
1805 yield fname
1807 # Sort the filenodes by their revision #
1806 # Sort the filenodes by their revision #
1808 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1807 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1809 # Create a group generator and only pass in a changenode
1808 # Create a group generator and only pass in a changenode
1810 # lookup function as we need to collect no information
1809 # lookup function as we need to collect no information
1811 # from filenodes.
1810 # from filenodes.
1812 group = filerevlog.group(msng_filenode_lst,
1811 group = filerevlog.group(msng_filenode_lst,
1813 lookup_filenode_link_func(fname))
1812 lookup_filenode_link_func(fname))
1814 for chnk in group:
1813 for chnk in group:
1815 yield chnk
1814 yield chnk
1816 if fname in msng_filenode_set:
1815 if fname in msng_filenode_set:
1817 # Don't need this anymore, toss it to free memory.
1816 # Don't need this anymore, toss it to free memory.
1818 del msng_filenode_set[fname]
1817 del msng_filenode_set[fname]
1819 # Signal that no more groups are left.
1818 # Signal that no more groups are left.
1820 yield changegroup.closechunk()
1819 yield changegroup.closechunk()
1821
1820
1822 if msng_cl_lst:
1821 if msng_cl_lst:
1823 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1822 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1824
1823
1825 return util.chunkbuffer(gengroup())
1824 return util.chunkbuffer(gengroup())
1826
1825
1827 def changegroup(self, basenodes, source):
1826 def changegroup(self, basenodes, source):
1828 """Generate a changegroup of all nodes that we have that a recipient
1827 """Generate a changegroup of all nodes that we have that a recipient
1829 doesn't.
1828 doesn't.
1830
1829
1831 This is much easier than the previous function as we can assume that
1830 This is much easier than the previous function as we can assume that
1832 the recipient has any changenode we aren't sending them."""
1831 the recipient has any changenode we aren't sending them."""
1833
1832
1834 self.hook('preoutgoing', throw=True, source=source)
1833 self.hook('preoutgoing', throw=True, source=source)
1835
1834
1836 cl = self.changelog
1835 cl = self.changelog
1837 nodes = cl.nodesbetween(basenodes, None)[0]
1836 nodes = cl.nodesbetween(basenodes, None)[0]
1838 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1837 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1839 self.changegroupinfo(nodes, source)
1838 self.changegroupinfo(nodes, source)
1840
1839
1841 def identity(x):
1840 def identity(x):
1842 return x
1841 return x
1843
1842
1844 def gennodelst(log):
1843 def gennodelst(log):
1845 for r in log:
1844 for r in log:
1846 n = log.node(r)
1845 n = log.node(r)
1847 if log.linkrev(n) in revset:
1846 if log.linkrev(n) in revset:
1848 yield n
1847 yield n
1849
1848
1850 def changed_file_collector(changedfileset):
1849 def changed_file_collector(changedfileset):
1851 def collect_changed_files(clnode):
1850 def collect_changed_files(clnode):
1852 c = cl.read(clnode)
1851 c = cl.read(clnode)
1853 for fname in c[3]:
1852 for fname in c[3]:
1854 changedfileset[fname] = 1
1853 changedfileset[fname] = 1
1855 return collect_changed_files
1854 return collect_changed_files
1856
1855
1857 def lookuprevlink_func(revlog):
1856 def lookuprevlink_func(revlog):
1858 def lookuprevlink(n):
1857 def lookuprevlink(n):
1859 return cl.node(revlog.linkrev(n))
1858 return cl.node(revlog.linkrev(n))
1860 return lookuprevlink
1859 return lookuprevlink
1861
1860
1862 def gengroup():
1861 def gengroup():
1863 # construct a list of all changed files
1862 # construct a list of all changed files
1864 changedfiles = {}
1863 changedfiles = {}
1865
1864
1866 for chnk in cl.group(nodes, identity,
1865 for chnk in cl.group(nodes, identity,
1867 changed_file_collector(changedfiles)):
1866 changed_file_collector(changedfiles)):
1868 yield chnk
1867 yield chnk
1869
1868
1870 mnfst = self.manifest
1869 mnfst = self.manifest
1871 nodeiter = gennodelst(mnfst)
1870 nodeiter = gennodelst(mnfst)
1872 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1871 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1873 yield chnk
1872 yield chnk
1874
1873
1875 for fname in util.sort(changedfiles):
1874 for fname in util.sort(changedfiles):
1876 filerevlog = self.file(fname)
1875 filerevlog = self.file(fname)
1877 if not len(filerevlog):
1876 if not len(filerevlog):
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 nodeiter = gennodelst(filerevlog)
1878 nodeiter = gennodelst(filerevlog)
1880 nodeiter = list(nodeiter)
1879 nodeiter = list(nodeiter)
1881 if nodeiter:
1880 if nodeiter:
1882 yield changegroup.chunkheader(len(fname))
1881 yield changegroup.chunkheader(len(fname))
1883 yield fname
1882 yield fname
1884 lookup = lookuprevlink_func(filerevlog)
1883 lookup = lookuprevlink_func(filerevlog)
1885 for chnk in filerevlog.group(nodeiter, lookup):
1884 for chnk in filerevlog.group(nodeiter, lookup):
1886 yield chnk
1885 yield chnk
1887
1886
1888 yield changegroup.closechunk()
1887 yield changegroup.closechunk()
1889
1888
1890 if nodes:
1889 if nodes:
1891 self.hook('outgoing', node=hex(nodes[0]), source=source)
1890 self.hook('outgoing', node=hex(nodes[0]), source=source)
1892
1891
1893 return util.chunkbuffer(gengroup())
1892 return util.chunkbuffer(gengroup())
1894
1893
1895 def addchangegroup(self, source, srctype, url, emptyok=False):
1894 def addchangegroup(self, source, srctype, url, emptyok=False):
1896 """add changegroup to repo.
1895 """add changegroup to repo.
1897
1896
1898 return values:
1897 return values:
1899 - nothing changed or no source: 0
1898 - nothing changed or no source: 0
1900 - more heads than before: 1+added heads (2..n)
1899 - more heads than before: 1+added heads (2..n)
1901 - less heads than before: -1-removed heads (-2..-n)
1900 - less heads than before: -1-removed heads (-2..-n)
1902 - number of heads stays the same: 1
1901 - number of heads stays the same: 1
1903 """
1902 """
1904 def csmap(x):
1903 def csmap(x):
1905 self.ui.debug(_("add changeset %s\n") % short(x))
1904 self.ui.debug(_("add changeset %s\n") % short(x))
1906 return len(cl)
1905 return len(cl)
1907
1906
1908 def revmap(x):
1907 def revmap(x):
1909 return cl.rev(x)
1908 return cl.rev(x)
1910
1909
1911 if not source:
1910 if not source:
1912 return 0
1911 return 0
1913
1912
1914 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1913 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1915
1914
1916 changesets = files = revisions = 0
1915 changesets = files = revisions = 0
1917
1916
1918 # write changelog data to temp files so concurrent readers will not see
1917 # write changelog data to temp files so concurrent readers will not see
1919 # inconsistent view
1918 # inconsistent view
1920 cl = self.changelog
1919 cl = self.changelog
1921 cl.delayupdate()
1920 cl.delayupdate()
1922 oldheads = len(cl.heads())
1921 oldheads = len(cl.heads())
1923
1922
1924 tr = self.transaction()
1923 tr = self.transaction()
1925 try:
1924 try:
1926 trp = weakref.proxy(tr)
1925 trp = weakref.proxy(tr)
1927 # pull off the changeset group
1926 # pull off the changeset group
1928 self.ui.status(_("adding changesets\n"))
1927 self.ui.status(_("adding changesets\n"))
1929 cor = len(cl) - 1
1928 cor = len(cl) - 1
1930 chunkiter = changegroup.chunkiter(source)
1929 chunkiter = changegroup.chunkiter(source)
1931 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1930 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1932 raise util.Abort(_("received changelog group is empty"))
1931 raise util.Abort(_("received changelog group is empty"))
1933 cnr = len(cl) - 1
1932 cnr = len(cl) - 1
1934 changesets = cnr - cor
1933 changesets = cnr - cor
1935
1934
1936 # pull off the manifest group
1935 # pull off the manifest group
1937 self.ui.status(_("adding manifests\n"))
1936 self.ui.status(_("adding manifests\n"))
1938 chunkiter = changegroup.chunkiter(source)
1937 chunkiter = changegroup.chunkiter(source)
1939 # no need to check for empty manifest group here:
1938 # no need to check for empty manifest group here:
1940 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1939 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1941 # no new manifest will be created and the manifest group will
1940 # no new manifest will be created and the manifest group will
1942 # be empty during the pull
1941 # be empty during the pull
1943 self.manifest.addgroup(chunkiter, revmap, trp)
1942 self.manifest.addgroup(chunkiter, revmap, trp)
1944
1943
1945 # process the files
1944 # process the files
1946 self.ui.status(_("adding file changes\n"))
1945 self.ui.status(_("adding file changes\n"))
1947 while 1:
1946 while 1:
1948 f = changegroup.getchunk(source)
1947 f = changegroup.getchunk(source)
1949 if not f:
1948 if not f:
1950 break
1949 break
1951 self.ui.debug(_("adding %s revisions\n") % f)
1950 self.ui.debug(_("adding %s revisions\n") % f)
1952 fl = self.file(f)
1951 fl = self.file(f)
1953 o = len(fl)
1952 o = len(fl)
1954 chunkiter = changegroup.chunkiter(source)
1953 chunkiter = changegroup.chunkiter(source)
1955 if fl.addgroup(chunkiter, revmap, trp) is None:
1954 if fl.addgroup(chunkiter, revmap, trp) is None:
1956 raise util.Abort(_("received file revlog group is empty"))
1955 raise util.Abort(_("received file revlog group is empty"))
1957 revisions += len(fl) - o
1956 revisions += len(fl) - o
1958 files += 1
1957 files += 1
1959
1958
1960 # make changelog see real files again
1959 # make changelog see real files again
1961 cl.finalize(trp)
1960 cl.finalize(trp)
1962
1961
1963 newheads = len(self.changelog.heads())
1962 newheads = len(self.changelog.heads())
1964 heads = ""
1963 heads = ""
1965 if oldheads and newheads != oldheads:
1964 if oldheads and newheads != oldheads:
1966 heads = _(" (%+d heads)") % (newheads - oldheads)
1965 heads = _(" (%+d heads)") % (newheads - oldheads)
1967
1966
1968 self.ui.status(_("added %d changesets"
1967 self.ui.status(_("added %d changesets"
1969 " with %d changes to %d files%s\n")
1968 " with %d changes to %d files%s\n")
1970 % (changesets, revisions, files, heads))
1969 % (changesets, revisions, files, heads))
1971
1970
1972 if changesets > 0:
1971 if changesets > 0:
1973 self.hook('pretxnchangegroup', throw=True,
1972 self.hook('pretxnchangegroup', throw=True,
1974 node=hex(self.changelog.node(cor+1)), source=srctype,
1973 node=hex(self.changelog.node(cor+1)), source=srctype,
1975 url=url)
1974 url=url)
1976
1975
1977 tr.close()
1976 tr.close()
1978 finally:
1977 finally:
1979 del tr
1978 del tr
1980
1979
1981 if changesets > 0:
1980 if changesets > 0:
1982 # forcefully update the on-disk branch cache
1981 # forcefully update the on-disk branch cache
1983 self.ui.debug(_("updating the branch cache\n"))
1982 self.ui.debug(_("updating the branch cache\n"))
1984 self.branchtags()
1983 self.branchtags()
1985 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1986 source=srctype, url=url)
1985 source=srctype, url=url)
1987
1986
1988 for i in xrange(cor + 1, cnr + 1):
1987 for i in xrange(cor + 1, cnr + 1):
1989 self.hook("incoming", node=hex(self.changelog.node(i)),
1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1990 source=srctype, url=url)
1989 source=srctype, url=url)
1991
1990
1992 # never return 0 here:
1991 # never return 0 here:
1993 if newheads < oldheads:
1992 if newheads < oldheads:
1994 return newheads - oldheads - 1
1993 return newheads - oldheads - 1
1995 else:
1994 else:
1996 return newheads - oldheads + 1
1995 return newheads - oldheads + 1
1997
1996
1998
1997
1999 def stream_in(self, remote):
1998 def stream_in(self, remote):
2000 fp = remote.stream_out()
1999 fp = remote.stream_out()
2001 l = fp.readline()
2000 l = fp.readline()
2002 try:
2001 try:
2003 resp = int(l)
2002 resp = int(l)
2004 except ValueError:
2003 except ValueError:
2005 raise util.UnexpectedOutput(
2004 raise util.UnexpectedOutput(
2006 _('Unexpected response from remote server:'), l)
2005 _('Unexpected response from remote server:'), l)
2007 if resp == 1:
2006 if resp == 1:
2008 raise util.Abort(_('operation forbidden by server'))
2007 raise util.Abort(_('operation forbidden by server'))
2009 elif resp == 2:
2008 elif resp == 2:
2010 raise util.Abort(_('locking the remote repository failed'))
2009 raise util.Abort(_('locking the remote repository failed'))
2011 elif resp != 0:
2010 elif resp != 0:
2012 raise util.Abort(_('the server sent an unknown error code'))
2011 raise util.Abort(_('the server sent an unknown error code'))
2013 self.ui.status(_('streaming all changes\n'))
2012 self.ui.status(_('streaming all changes\n'))
2014 l = fp.readline()
2013 l = fp.readline()
2015 try:
2014 try:
2016 total_files, total_bytes = map(int, l.split(' ', 1))
2015 total_files, total_bytes = map(int, l.split(' ', 1))
2017 except (ValueError, TypeError):
2016 except (ValueError, TypeError):
2018 raise util.UnexpectedOutput(
2017 raise util.UnexpectedOutput(
2019 _('Unexpected response from remote server:'), l)
2018 _('Unexpected response from remote server:'), l)
2020 self.ui.status(_('%d files to transfer, %s of data\n') %
2019 self.ui.status(_('%d files to transfer, %s of data\n') %
2021 (total_files, util.bytecount(total_bytes)))
2020 (total_files, util.bytecount(total_bytes)))
2022 start = time.time()
2021 start = time.time()
2023 for i in xrange(total_files):
2022 for i in xrange(total_files):
2024 # XXX doesn't support '\n' or '\r' in filenames
2023 # XXX doesn't support '\n' or '\r' in filenames
2025 l = fp.readline()
2024 l = fp.readline()
2026 try:
2025 try:
2027 name, size = l.split('\0', 1)
2026 name, size = l.split('\0', 1)
2028 size = int(size)
2027 size = int(size)
2029 except ValueError, TypeError:
2028 except ValueError, TypeError:
2030 raise util.UnexpectedOutput(
2029 raise util.UnexpectedOutput(
2031 _('Unexpected response from remote server:'), l)
2030 _('Unexpected response from remote server:'), l)
2032 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2031 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2033 ofp = self.sopener(name, 'w')
2032 ofp = self.sopener(name, 'w')
2034 for chunk in util.filechunkiter(fp, limit=size):
2033 for chunk in util.filechunkiter(fp, limit=size):
2035 ofp.write(chunk)
2034 ofp.write(chunk)
2036 ofp.close()
2035 ofp.close()
2037 elapsed = time.time() - start
2036 elapsed = time.time() - start
2038 if elapsed <= 0:
2037 if elapsed <= 0:
2039 elapsed = 0.001
2038 elapsed = 0.001
2040 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2041 (util.bytecount(total_bytes), elapsed,
2040 (util.bytecount(total_bytes), elapsed,
2042 util.bytecount(total_bytes / elapsed)))
2041 util.bytecount(total_bytes / elapsed)))
2043 self.invalidate()
2042 self.invalidate()
2044 return len(self.heads()) + 1
2043 return len(self.heads()) + 1
2045
2044
2046 def clone(self, remote, heads=[], stream=False):
2045 def clone(self, remote, heads=[], stream=False):
2047 '''clone remote repository.
2046 '''clone remote repository.
2048
2047
2049 keyword arguments:
2048 keyword arguments:
2050 heads: list of revs to clone (forces use of pull)
2049 heads: list of revs to clone (forces use of pull)
2051 stream: use streaming clone if possible'''
2050 stream: use streaming clone if possible'''
2052
2051
2053 # now, all clients that can request uncompressed clones can
2052 # now, all clients that can request uncompressed clones can
2054 # read repo formats supported by all servers that can serve
2053 # read repo formats supported by all servers that can serve
2055 # them.
2054 # them.
2056
2055
2057 # if revlog format changes, client will have to check version
2056 # if revlog format changes, client will have to check version
2058 # and format flags on "stream" capability, and use
2057 # and format flags on "stream" capability, and use
2059 # uncompressed only if compatible.
2058 # uncompressed only if compatible.
2060
2059
2061 if stream and not heads and remote.capable('stream'):
2060 if stream and not heads and remote.capable('stream'):
2062 return self.stream_in(remote)
2061 return self.stream_in(remote)
2063 return self.pull(remote, heads)
2062 return self.pull(remote, heads)
2064
2063
2065 # used to avoid circular references so destructors work
2064 # used to avoid circular references so destructors work
2066 def aftertrans(files):
2065 def aftertrans(files):
2067 renamefiles = [tuple(t) for t in files]
2066 renamefiles = [tuple(t) for t in files]
2068 def a():
2067 def a():
2069 for src, dest in renamefiles:
2068 for src, dest in renamefiles:
2070 util.rename(src, dest)
2069 util.rename(src, dest)
2071 return a
2070 return a
2072
2071
2073 def instance(ui, path, create):
2072 def instance(ui, path, create):
2074 return localrepository(ui, util.drop_scheme('file', path), create)
2073 return localrepository(ui, util.drop_scheme('file', path), create)
2075
2074
2076 def islocal(path):
2075 def islocal(path):
2077 return True
2076 return True
@@ -1,20 +1,20
1 adding empty-file
1 adding empty-file
2 adding large-file
2 adding large-file
3 adding another-file
3 adding another-file
4 removing empty-file
4 removing empty-file
5 removing large-file
5 removing large-file
6 recording removal of large-file as rename to another-file (99% similar)
6 recording removal of large-file as rename to another-file (99% similar)
7 % comparing two empty files caused ZeroDivisionError in the past
7 % comparing two empty files caused ZeroDivisionError in the past
8 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
8 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
9 adding another-empty-file
9 adding another-empty-file
10 removing empty-file
10 removing empty-file
11 adding large-file
11 adding large-file
12 adding tiny-file
12 adding tiny-file
13 removing large-file
13 adding small-file
14 adding small-file
14 removing large-file
15 removing tiny-file
15 removing tiny-file
16 recording removal of tiny-file as rename to small-file (82% similar)
16 recording removal of tiny-file as rename to small-file (82% similar)
17 % should all fail
17 % should all fail
18 abort: similarity must be a number
18 abort: similarity must be a number
19 abort: similarity must be between 0 and 100
19 abort: similarity must be between 0 and 100
20 abort: similarity must be between 0 and 100
20 abort: similarity must be between 0 and 100
@@ -1,15 +1,15
1 adding dir/bar
1 adding dir/bar
2 adding foo
2 adding foo
3 dir/bar
3 dir/bar
4 foo
4 foo
5 adding dir/bar_2
5 adding dir/bar_2
6 adding foo_2
6 adding foo_2
7 dir/bar_2
7 dir/bar_2
8 foo_2
8 foo_2
9 adding a
9 adding a
10 adding c
10 adding c
11 removing a
11 adding b
12 adding b
13 removing c
12 adding d
14 adding d
13 removing a
14 removing c
15 recording removal of a as rename to b (100% similar)
15 recording removal of a as rename to b (100% similar)
General Comments 0
You need to be logged in to leave comments. Login now