##// END OF EJS Templates
Minor default branch cleanups
Matt Mackall -
r4178:0b48e398 default
parent child Browse files
Show More
@@ -1,509 +1,509 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import demandload
10 from demandload import demandload
11 demandload(globals(), "ancestor bdiff repo revlog util os")
11 demandload(globals(), "ancestor bdiff repo revlog util os")
12
12
13 class changectx(object):
13 class changectx(object):
14 """A changecontext object makes access to data related to a particular
14 """A changecontext object makes access to data related to a particular
15 changeset convenient."""
15 changeset convenient."""
16 def __init__(self, repo, changeid=None):
16 def __init__(self, repo, changeid=None):
17 """changeid is a revision number, node, or tag"""
17 """changeid is a revision number, node, or tag"""
18 self._repo = repo
18 self._repo = repo
19
19
20 if not changeid and changeid != 0:
20 if not changeid and changeid != 0:
21 p1, p2 = self._repo.dirstate.parents()
21 p1, p2 = self._repo.dirstate.parents()
22 self._rev = self._repo.changelog.rev(p1)
22 self._rev = self._repo.changelog.rev(p1)
23 if self._rev == -1:
23 if self._rev == -1:
24 changeid = 'tip'
24 changeid = 'tip'
25 else:
25 else:
26 self._node = p1
26 self._node = p1
27 return
27 return
28
28
29 self._node = self._repo.lookup(changeid)
29 self._node = self._repo.lookup(changeid)
30 self._rev = self._repo.changelog.rev(self._node)
30 self._rev = self._repo.changelog.rev(self._node)
31
31
32 def __str__(self):
32 def __str__(self):
33 return short(self.node())
33 return short(self.node())
34
34
35 def __repr__(self):
35 def __repr__(self):
36 return "<changectx %s>" % str(self)
36 return "<changectx %s>" % str(self)
37
37
38 def __eq__(self, other):
38 def __eq__(self, other):
39 try:
39 try:
40 return self._rev == other._rev
40 return self._rev == other._rev
41 except AttributeError:
41 except AttributeError:
42 return False
42 return False
43
43
44 def __nonzero__(self):
44 def __nonzero__(self):
45 return self._rev != nullrev
45 return self._rev != nullrev
46
46
47 def __getattr__(self, name):
47 def __getattr__(self, name):
48 if name == '_changeset':
48 if name == '_changeset':
49 self._changeset = self._repo.changelog.read(self.node())
49 self._changeset = self._repo.changelog.read(self.node())
50 return self._changeset
50 return self._changeset
51 elif name == '_manifest':
51 elif name == '_manifest':
52 self._manifest = self._repo.manifest.read(self._changeset[0])
52 self._manifest = self._repo.manifest.read(self._changeset[0])
53 return self._manifest
53 return self._manifest
54 elif name == '_manifestdelta':
54 elif name == '_manifestdelta':
55 md = self._repo.manifest.readdelta(self._changeset[0])
55 md = self._repo.manifest.readdelta(self._changeset[0])
56 self._manifestdelta = md
56 self._manifestdelta = md
57 return self._manifestdelta
57 return self._manifestdelta
58 else:
58 else:
59 raise AttributeError, name
59 raise AttributeError, name
60
60
61 def changeset(self): return self._changeset
61 def changeset(self): return self._changeset
62 def manifest(self): return self._manifest
62 def manifest(self): return self._manifest
63
63
64 def rev(self): return self._rev
64 def rev(self): return self._rev
65 def node(self): return self._node
65 def node(self): return self._node
66 def user(self): return self._changeset[1]
66 def user(self): return self._changeset[1]
67 def date(self): return self._changeset[2]
67 def date(self): return self._changeset[2]
68 def files(self): return self._changeset[3]
68 def files(self): return self._changeset[3]
69 def description(self): return self._changeset[4]
69 def description(self): return self._changeset[4]
70 def branch(self): return self._changeset[5].get("branch", "")
70 def branch(self): return self._changeset[5].get("branch")
71
71
72 def parents(self):
72 def parents(self):
73 """return contexts for each parent changeset"""
73 """return contexts for each parent changeset"""
74 p = self._repo.changelog.parents(self._node)
74 p = self._repo.changelog.parents(self._node)
75 return [changectx(self._repo, x) for x in p]
75 return [changectx(self._repo, x) for x in p]
76
76
77 def children(self):
77 def children(self):
78 """return contexts for each child changeset"""
78 """return contexts for each child changeset"""
79 c = self._repo.changelog.children(self._node)
79 c = self._repo.changelog.children(self._node)
80 return [changectx(self._repo, x) for x in c]
80 return [changectx(self._repo, x) for x in c]
81
81
82 def filenode(self, path):
82 def filenode(self, path):
83 if '_manifest' in self.__dict__:
83 if '_manifest' in self.__dict__:
84 try:
84 try:
85 return self._manifest[path]
85 return self._manifest[path]
86 except KeyError:
86 except KeyError:
87 raise repo.LookupError(_("'%s' not found in manifest") % path)
87 raise repo.LookupError(_("'%s' not found in manifest") % path)
88 if '_manifestdelta' in self.__dict__ or path in self.files():
88 if '_manifestdelta' in self.__dict__ or path in self.files():
89 if path in self._manifestdelta:
89 if path in self._manifestdelta:
90 return self._manifestdelta[path]
90 return self._manifestdelta[path]
91 node, flag = self._repo.manifest.find(self._changeset[0], path)
91 node, flag = self._repo.manifest.find(self._changeset[0], path)
92 if not node:
92 if not node:
93 raise repo.LookupError(_("'%s' not found in manifest") % path)
93 raise repo.LookupError(_("'%s' not found in manifest") % path)
94
94
95 return node
95 return node
96
96
97 def filectx(self, path, fileid=None):
97 def filectx(self, path, fileid=None):
98 """get a file context from this changeset"""
98 """get a file context from this changeset"""
99 if fileid is None:
99 if fileid is None:
100 fileid = self.filenode(path)
100 fileid = self.filenode(path)
101 return filectx(self._repo, path, fileid=fileid, changectx=self)
101 return filectx(self._repo, path, fileid=fileid, changectx=self)
102
102
103 def filectxs(self):
103 def filectxs(self):
104 """generate a file context for each file in this changeset's
104 """generate a file context for each file in this changeset's
105 manifest"""
105 manifest"""
106 mf = self.manifest()
106 mf = self.manifest()
107 m = mf.keys()
107 m = mf.keys()
108 m.sort()
108 m.sort()
109 for f in m:
109 for f in m:
110 yield self.filectx(f, fileid=mf[f])
110 yield self.filectx(f, fileid=mf[f])
111
111
112 def ancestor(self, c2):
112 def ancestor(self, c2):
113 """
113 """
114 return the ancestor context of self and c2
114 return the ancestor context of self and c2
115 """
115 """
116 n = self._repo.changelog.ancestor(self._node, c2._node)
116 n = self._repo.changelog.ancestor(self._node, c2._node)
117 return changectx(self._repo, n)
117 return changectx(self._repo, n)
118
118
119 class filectx(object):
119 class filectx(object):
120 """A filecontext object makes access to data related to a particular
120 """A filecontext object makes access to data related to a particular
121 filerevision convenient."""
121 filerevision convenient."""
122 def __init__(self, repo, path, changeid=None, fileid=None,
122 def __init__(self, repo, path, changeid=None, fileid=None,
123 filelog=None, changectx=None):
123 filelog=None, changectx=None):
124 """changeid can be a changeset revision, node, or tag.
124 """changeid can be a changeset revision, node, or tag.
125 fileid can be a file revision or node."""
125 fileid can be a file revision or node."""
126 self._repo = repo
126 self._repo = repo
127 self._path = path
127 self._path = path
128
128
129 assert changeid is not None or fileid is not None
129 assert changeid is not None or fileid is not None
130
130
131 if filelog:
131 if filelog:
132 self._filelog = filelog
132 self._filelog = filelog
133 if changectx:
133 if changectx:
134 self._changectx = changectx
134 self._changectx = changectx
135 self._changeid = changectx.node()
135 self._changeid = changectx.node()
136
136
137 if fileid is None:
137 if fileid is None:
138 self._changeid = changeid
138 self._changeid = changeid
139 else:
139 else:
140 self._fileid = fileid
140 self._fileid = fileid
141
141
142 def __getattr__(self, name):
142 def __getattr__(self, name):
143 if name == '_changectx':
143 if name == '_changectx':
144 self._changectx = changectx(self._repo, self._changeid)
144 self._changectx = changectx(self._repo, self._changeid)
145 return self._changectx
145 return self._changectx
146 elif name == '_filelog':
146 elif name == '_filelog':
147 self._filelog = self._repo.file(self._path)
147 self._filelog = self._repo.file(self._path)
148 return self._filelog
148 return self._filelog
149 elif name == '_changeid':
149 elif name == '_changeid':
150 self._changeid = self._filelog.linkrev(self._filenode)
150 self._changeid = self._filelog.linkrev(self._filenode)
151 return self._changeid
151 return self._changeid
152 elif name == '_filenode':
152 elif name == '_filenode':
153 try:
153 try:
154 if '_fileid' in self.__dict__:
154 if '_fileid' in self.__dict__:
155 self._filenode = self._filelog.lookup(self._fileid)
155 self._filenode = self._filelog.lookup(self._fileid)
156 else:
156 else:
157 self._filenode = self._changectx.filenode(self._path)
157 self._filenode = self._changectx.filenode(self._path)
158 except revlog.RevlogError, inst:
158 except revlog.RevlogError, inst:
159 raise repo.LookupError(str(inst))
159 raise repo.LookupError(str(inst))
160 return self._filenode
160 return self._filenode
161 elif name == '_filerev':
161 elif name == '_filerev':
162 self._filerev = self._filelog.rev(self._filenode)
162 self._filerev = self._filelog.rev(self._filenode)
163 return self._filerev
163 return self._filerev
164 else:
164 else:
165 raise AttributeError, name
165 raise AttributeError, name
166
166
167 def __nonzero__(self):
167 def __nonzero__(self):
168 try:
168 try:
169 n = self._filenode
169 n = self._filenode
170 return True
170 return True
171 except repo.LookupError:
171 except repo.LookupError:
172 # file is missing
172 # file is missing
173 return False
173 return False
174
174
175 def __str__(self):
175 def __str__(self):
176 return "%s@%s" % (self.path(), short(self.node()))
176 return "%s@%s" % (self.path(), short(self.node()))
177
177
178 def __repr__(self):
178 def __repr__(self):
179 return "<filectx %s>" % str(self)
179 return "<filectx %s>" % str(self)
180
180
181 def __eq__(self, other):
181 def __eq__(self, other):
182 try:
182 try:
183 return (self._path == other._path
183 return (self._path == other._path
184 and self._changeid == other._changeid)
184 and self._changeid == other._changeid)
185 except AttributeError:
185 except AttributeError:
186 return False
186 return False
187
187
188 def filectx(self, fileid):
188 def filectx(self, fileid):
189 '''opens an arbitrary revision of the file without
189 '''opens an arbitrary revision of the file without
190 opening a new filelog'''
190 opening a new filelog'''
191 return filectx(self._repo, self._path, fileid=fileid,
191 return filectx(self._repo, self._path, fileid=fileid,
192 filelog=self._filelog)
192 filelog=self._filelog)
193
193
194 def filerev(self): return self._filerev
194 def filerev(self): return self._filerev
195 def filenode(self): return self._filenode
195 def filenode(self): return self._filenode
196 def filelog(self): return self._filelog
196 def filelog(self): return self._filelog
197
197
198 def rev(self):
198 def rev(self):
199 if '_changectx' in self.__dict__:
199 if '_changectx' in self.__dict__:
200 return self._changectx.rev()
200 return self._changectx.rev()
201 return self._filelog.linkrev(self._filenode)
201 return self._filelog.linkrev(self._filenode)
202
202
203 def node(self): return self._changectx.node()
203 def node(self): return self._changectx.node()
204 def user(self): return self._changectx.user()
204 def user(self): return self._changectx.user()
205 def date(self): return self._changectx.date()
205 def date(self): return self._changectx.date()
206 def files(self): return self._changectx.files()
206 def files(self): return self._changectx.files()
207 def description(self): return self._changectx.description()
207 def description(self): return self._changectx.description()
208 def branch(self): return self._changectx.branch()
208 def branch(self): return self._changectx.branch()
209 def manifest(self): return self._changectx.manifest()
209 def manifest(self): return self._changectx.manifest()
210 def changectx(self): return self._changectx
210 def changectx(self): return self._changectx
211
211
212 def data(self): return self._filelog.read(self._filenode)
212 def data(self): return self._filelog.read(self._filenode)
213 def renamed(self): return self._filelog.renamed(self._filenode)
213 def renamed(self): return self._filelog.renamed(self._filenode)
214 def path(self): return self._path
214 def path(self): return self._path
215 def size(self): return self._filelog.size(self._filerev)
215 def size(self): return self._filelog.size(self._filerev)
216
216
217 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
217 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
218
218
219 def parents(self):
219 def parents(self):
220 p = self._path
220 p = self._path
221 fl = self._filelog
221 fl = self._filelog
222 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
222 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
223
223
224 r = self.renamed()
224 r = self.renamed()
225 if r:
225 if r:
226 pl[0] = (r[0], r[1], None)
226 pl[0] = (r[0], r[1], None)
227
227
228 return [filectx(self._repo, p, fileid=n, filelog=l)
228 return [filectx(self._repo, p, fileid=n, filelog=l)
229 for p,n,l in pl if n != nullid]
229 for p,n,l in pl if n != nullid]
230
230
231 def children(self):
231 def children(self):
232 # hard for renames
232 # hard for renames
233 c = self._filelog.children(self._filenode)
233 c = self._filelog.children(self._filenode)
234 return [filectx(self._repo, self._path, fileid=x,
234 return [filectx(self._repo, self._path, fileid=x,
235 filelog=self._filelog) for x in c]
235 filelog=self._filelog) for x in c]
236
236
237 def annotate(self, follow=False):
237 def annotate(self, follow=False):
238 '''returns a list of tuples of (ctx, line) for each line
238 '''returns a list of tuples of (ctx, line) for each line
239 in the file, where ctx is the filectx of the node where
239 in the file, where ctx is the filectx of the node where
240 that line was last changed'''
240 that line was last changed'''
241
241
242 def decorate(text, rev):
242 def decorate(text, rev):
243 return ([rev] * len(text.splitlines()), text)
243 return ([rev] * len(text.splitlines()), text)
244
244
245 def pair(parent, child):
245 def pair(parent, child):
246 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
246 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
247 child[0][b1:b2] = parent[0][a1:a2]
247 child[0][b1:b2] = parent[0][a1:a2]
248 return child
248 return child
249
249
250 getlog = util.cachefunc(lambda x: self._repo.file(x))
250 getlog = util.cachefunc(lambda x: self._repo.file(x))
251 def getctx(path, fileid):
251 def getctx(path, fileid):
252 log = path == self._path and self._filelog or getlog(path)
252 log = path == self._path and self._filelog or getlog(path)
253 return filectx(self._repo, path, fileid=fileid, filelog=log)
253 return filectx(self._repo, path, fileid=fileid, filelog=log)
254 getctx = util.cachefunc(getctx)
254 getctx = util.cachefunc(getctx)
255
255
256 def parents(f):
256 def parents(f):
257 # we want to reuse filectx objects as much as possible
257 # we want to reuse filectx objects as much as possible
258 p = f._path
258 p = f._path
259 if f._filerev is None: # working dir
259 if f._filerev is None: # working dir
260 pl = [(n.path(), n.filerev()) for n in f.parents()]
260 pl = [(n.path(), n.filerev()) for n in f.parents()]
261 else:
261 else:
262 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
262 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
263
263
264 if follow:
264 if follow:
265 r = f.renamed()
265 r = f.renamed()
266 if r:
266 if r:
267 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
267 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
268
268
269 return [getctx(p, n) for p, n in pl if n != nullrev]
269 return [getctx(p, n) for p, n in pl if n != nullrev]
270
270
271 # use linkrev to find the first changeset where self appeared
271 # use linkrev to find the first changeset where self appeared
272 if self.rev() != self._filelog.linkrev(self._filenode):
272 if self.rev() != self._filelog.linkrev(self._filenode):
273 base = self.filectx(self.filerev())
273 base = self.filectx(self.filerev())
274 else:
274 else:
275 base = self
275 base = self
276
276
277 # find all ancestors
277 # find all ancestors
278 needed = {base: 1}
278 needed = {base: 1}
279 visit = [base]
279 visit = [base]
280 files = [base._path]
280 files = [base._path]
281 while visit:
281 while visit:
282 f = visit.pop(0)
282 f = visit.pop(0)
283 for p in parents(f):
283 for p in parents(f):
284 if p not in needed:
284 if p not in needed:
285 needed[p] = 1
285 needed[p] = 1
286 visit.append(p)
286 visit.append(p)
287 if p._path not in files:
287 if p._path not in files:
288 files.append(p._path)
288 files.append(p._path)
289 else:
289 else:
290 # count how many times we'll use this
290 # count how many times we'll use this
291 needed[p] += 1
291 needed[p] += 1
292
292
293 # sort by revision (per file) which is a topological order
293 # sort by revision (per file) which is a topological order
294 visit = []
294 visit = []
295 files.reverse()
295 files.reverse()
296 for f in files:
296 for f in files:
297 fn = [(n._filerev, n) for n in needed.keys() if n._path == f]
297 fn = [(n._filerev, n) for n in needed.keys() if n._path == f]
298 fn.sort()
298 fn.sort()
299 visit.extend(fn)
299 visit.extend(fn)
300 hist = {}
300 hist = {}
301
301
302 for r, f in visit:
302 for r, f in visit:
303 curr = decorate(f.data(), f)
303 curr = decorate(f.data(), f)
304 for p in parents(f):
304 for p in parents(f):
305 if p != nullid:
305 if p != nullid:
306 curr = pair(hist[p], curr)
306 curr = pair(hist[p], curr)
307 # trim the history of unneeded revs
307 # trim the history of unneeded revs
308 needed[p] -= 1
308 needed[p] -= 1
309 if not needed[p]:
309 if not needed[p]:
310 del hist[p]
310 del hist[p]
311 hist[f] = curr
311 hist[f] = curr
312
312
313 return zip(hist[f][0], hist[f][1].splitlines(1))
313 return zip(hist[f][0], hist[f][1].splitlines(1))
314
314
315 def ancestor(self, fc2):
315 def ancestor(self, fc2):
316 """
316 """
317 find the common ancestor file context, if any, of self, and fc2
317 find the common ancestor file context, if any, of self, and fc2
318 """
318 """
319
319
320 acache = {}
320 acache = {}
321
321
322 # prime the ancestor cache for the working directory
322 # prime the ancestor cache for the working directory
323 for c in (self, fc2):
323 for c in (self, fc2):
324 if c._filerev == None:
324 if c._filerev == None:
325 pl = [(n.path(), n.filenode()) for n in c.parents()]
325 pl = [(n.path(), n.filenode()) for n in c.parents()]
326 acache[(c._path, None)] = pl
326 acache[(c._path, None)] = pl
327
327
328 flcache = {self._path:self._filelog, fc2._path:fc2._filelog}
328 flcache = {self._path:self._filelog, fc2._path:fc2._filelog}
329 def parents(vertex):
329 def parents(vertex):
330 if vertex in acache:
330 if vertex in acache:
331 return acache[vertex]
331 return acache[vertex]
332 f, n = vertex
332 f, n = vertex
333 if f not in flcache:
333 if f not in flcache:
334 flcache[f] = self._repo.file(f)
334 flcache[f] = self._repo.file(f)
335 fl = flcache[f]
335 fl = flcache[f]
336 pl = [(f, p) for p in fl.parents(n) if p != nullid]
336 pl = [(f, p) for p in fl.parents(n) if p != nullid]
337 re = fl.renamed(n)
337 re = fl.renamed(n)
338 if re:
338 if re:
339 pl.append(re)
339 pl.append(re)
340 acache[vertex] = pl
340 acache[vertex] = pl
341 return pl
341 return pl
342
342
343 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
343 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
344 v = ancestor.ancestor(a, b, parents)
344 v = ancestor.ancestor(a, b, parents)
345 if v:
345 if v:
346 f, n = v
346 f, n = v
347 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
347 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
348
348
349 return None
349 return None
350
350
351 class workingctx(changectx):
351 class workingctx(changectx):
352 """A workingctx object makes access to data related to
352 """A workingctx object makes access to data related to
353 the current working directory convenient."""
353 the current working directory convenient."""
354 def __init__(self, repo):
354 def __init__(self, repo):
355 self._repo = repo
355 self._repo = repo
356 self._rev = None
356 self._rev = None
357 self._node = None
357 self._node = None
358
358
359 def __str__(self):
359 def __str__(self):
360 return str(self._parents[0]) + "+"
360 return str(self._parents[0]) + "+"
361
361
362 def __nonzero__(self):
362 def __nonzero__(self):
363 return True
363 return True
364
364
365 def __getattr__(self, name):
365 def __getattr__(self, name):
366 if name == '_parents':
366 if name == '_parents':
367 self._parents = self._repo.parents()
367 self._parents = self._repo.parents()
368 return self._parents
368 return self._parents
369 if name == '_status':
369 if name == '_status':
370 self._status = self._repo.status()
370 self._status = self._repo.status()
371 return self._status
371 return self._status
372 if name == '_manifest':
372 if name == '_manifest':
373 self._buildmanifest()
373 self._buildmanifest()
374 return self._manifest
374 return self._manifest
375 else:
375 else:
376 raise AttributeError, name
376 raise AttributeError, name
377
377
378 def _buildmanifest(self):
378 def _buildmanifest(self):
379 """generate a manifest corresponding to the working directory"""
379 """generate a manifest corresponding to the working directory"""
380
380
381 man = self._parents[0].manifest().copy()
381 man = self._parents[0].manifest().copy()
382 copied = self._repo.dirstate.copies()
382 copied = self._repo.dirstate.copies()
383 modified, added, removed, deleted, unknown = self._status[:5]
383 modified, added, removed, deleted, unknown = self._status[:5]
384 for i, l in (("a", added), ("m", modified), ("u", unknown)):
384 for i, l in (("a", added), ("m", modified), ("u", unknown)):
385 for f in l:
385 for f in l:
386 man[f] = man.get(copied.get(f, f), nullid) + i
386 man[f] = man.get(copied.get(f, f), nullid) + i
387 try:
387 try:
388 man.set(f, util.is_exec(self._repo.wjoin(f), man.execf(f)))
388 man.set(f, util.is_exec(self._repo.wjoin(f), man.execf(f)))
389 except OSError:
389 except OSError:
390 pass
390 pass
391
391
392 for f in deleted + removed:
392 for f in deleted + removed:
393 if f in man:
393 if f in man:
394 del man[f]
394 del man[f]
395
395
396 self._manifest = man
396 self._manifest = man
397
397
398 def manifest(self): return self._manifest
398 def manifest(self): return self._manifest
399
399
400 def user(self): return self._repo.ui.username()
400 def user(self): return self._repo.ui.username()
401 def date(self): return util.makedate()
401 def date(self): return util.makedate()
402 def description(self): return ""
402 def description(self): return ""
403 def files(self):
403 def files(self):
404 f = self.modified() + self.added() + self.removed()
404 f = self.modified() + self.added() + self.removed()
405 f.sort()
405 f.sort()
406 return f
406 return f
407
407
408 def modified(self): return self._status[0]
408 def modified(self): return self._status[0]
409 def added(self): return self._status[1]
409 def added(self): return self._status[1]
410 def removed(self): return self._status[2]
410 def removed(self): return self._status[2]
411 def deleted(self): return self._status[3]
411 def deleted(self): return self._status[3]
412 def unknown(self): return self._status[4]
412 def unknown(self): return self._status[4]
413 def clean(self): return self._status[5]
413 def clean(self): return self._status[5]
414 def branch(self):
414 def branch(self):
415 try:
415 try:
416 return self._repo.opener("branch").read().strip() or "default"
416 return self._repo.opener("branch").read().strip() or "default"
417 except IOError:
417 except IOError:
418 return "default"
418 return "default"
419
419
420 def parents(self):
420 def parents(self):
421 """return contexts for each parent changeset"""
421 """return contexts for each parent changeset"""
422 return self._parents
422 return self._parents
423
423
424 def children(self):
424 def children(self):
425 return []
425 return []
426
426
427 def filectx(self, path):
427 def filectx(self, path):
428 """get a file context from the working directory"""
428 """get a file context from the working directory"""
429 return workingfilectx(self._repo, path, workingctx=self)
429 return workingfilectx(self._repo, path, workingctx=self)
430
430
431 def ancestor(self, c2):
431 def ancestor(self, c2):
432 """return the ancestor context of self and c2"""
432 """return the ancestor context of self and c2"""
433 return self._parents[0].ancestor(c2) # punt on two parents for now
433 return self._parents[0].ancestor(c2) # punt on two parents for now
434
434
435 class workingfilectx(filectx):
435 class workingfilectx(filectx):
436 """A workingfilectx object makes access to data related to a particular
436 """A workingfilectx object makes access to data related to a particular
437 file in the working directory convenient."""
437 file in the working directory convenient."""
438 def __init__(self, repo, path, filelog=None, workingctx=None):
438 def __init__(self, repo, path, filelog=None, workingctx=None):
439 """changeid can be a changeset revision, node, or tag.
439 """changeid can be a changeset revision, node, or tag.
440 fileid can be a file revision or node."""
440 fileid can be a file revision or node."""
441 self._repo = repo
441 self._repo = repo
442 self._path = path
442 self._path = path
443 self._changeid = None
443 self._changeid = None
444 self._filerev = self._filenode = None
444 self._filerev = self._filenode = None
445
445
446 if filelog:
446 if filelog:
447 self._filelog = filelog
447 self._filelog = filelog
448 if workingctx:
448 if workingctx:
449 self._changectx = workingctx
449 self._changectx = workingctx
450
450
451 def __getattr__(self, name):
451 def __getattr__(self, name):
452 if name == '_changectx':
452 if name == '_changectx':
453 self._changectx = workingctx(repo)
453 self._changectx = workingctx(repo)
454 return self._changectx
454 return self._changectx
455 elif name == '_repopath':
455 elif name == '_repopath':
456 self._repopath = (self._repo.dirstate.copied(self._path)
456 self._repopath = (self._repo.dirstate.copied(self._path)
457 or self._path)
457 or self._path)
458 return self._repopath
458 return self._repopath
459 elif name == '_filelog':
459 elif name == '_filelog':
460 self._filelog = self._repo.file(self._repopath)
460 self._filelog = self._repo.file(self._repopath)
461 return self._filelog
461 return self._filelog
462 else:
462 else:
463 raise AttributeError, name
463 raise AttributeError, name
464
464
465 def __nonzero__(self):
465 def __nonzero__(self):
466 return True
466 return True
467
467
468 def __str__(self):
468 def __str__(self):
469 return "%s@%s" % (self.path(), self._changectx)
469 return "%s@%s" % (self.path(), self._changectx)
470
470
471 def filectx(self, fileid):
471 def filectx(self, fileid):
472 '''opens an arbitrary revision of the file without
472 '''opens an arbitrary revision of the file without
473 opening a new filelog'''
473 opening a new filelog'''
474 return filectx(self._repo, self._repopath, fileid=fileid,
474 return filectx(self._repo, self._repopath, fileid=fileid,
475 filelog=self._filelog)
475 filelog=self._filelog)
476
476
477 def rev(self):
477 def rev(self):
478 if '_changectx' in self.__dict__:
478 if '_changectx' in self.__dict__:
479 return self._changectx.rev()
479 return self._changectx.rev()
480 return self._filelog.linkrev(self._filenode)
480 return self._filelog.linkrev(self._filenode)
481
481
482 def data(self): return self._repo.wread(self._path)
482 def data(self): return self._repo.wread(self._path)
483 def renamed(self):
483 def renamed(self):
484 rp = self._repopath
484 rp = self._repopath
485 if rp == self._path:
485 if rp == self._path:
486 return None
486 return None
487 return rp, self._workingctx._parents._manifest.get(rp, nullid)
487 return rp, self._workingctx._parents._manifest.get(rp, nullid)
488
488
489 def parents(self):
489 def parents(self):
490 '''return parent filectxs, following copies if necessary'''
490 '''return parent filectxs, following copies if necessary'''
491 p = self._path
491 p = self._path
492 rp = self._repopath
492 rp = self._repopath
493 pcl = self._changectx._parents
493 pcl = self._changectx._parents
494 fl = self._filelog
494 fl = self._filelog
495 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
495 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
496 if len(pcl) > 1:
496 if len(pcl) > 1:
497 if rp != p:
497 if rp != p:
498 fl = None
498 fl = None
499 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
499 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
500
500
501 return [filectx(self._repo, p, fileid=n, filelog=l)
501 return [filectx(self._repo, p, fileid=n, filelog=l)
502 for p,n,l in pl if n != nullid]
502 for p,n,l in pl if n != nullid]
503
503
504 def children(self):
504 def children(self):
505 return []
505 return []
506
506
507 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
507 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
508
508
509 def cmp(self, text): return self._repo.wread(self._path) == text
509 def cmp(self, text): return self._repo.wread(self._path) == text
@@ -1,1990 +1,1990 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __del__(self):
21 def __del__(self):
22 self.transhandle = None
22 self.transhandle = None
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 if not path:
25 if not path:
26 p = os.getcwd()
26 p = os.getcwd()
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
28 oldp = p
28 oldp = p
29 p = os.path.dirname(p)
29 p = os.path.dirname(p)
30 if p == oldp:
30 if p == oldp:
31 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
32 " here (.hg not found)"))
32 " here (.hg not found)"))
33 path = p
33 path = p
34
34
35 self.root = os.path.realpath(path)
35 self.root = os.path.realpath(path)
36 self.path = os.path.join(self.root, ".hg")
36 self.path = os.path.join(self.root, ".hg")
37 self.origroot = path
37 self.origroot = path
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
47 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 v = self.ui.configrevlog()
91 v = self.ui.configrevlog()
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 fl = v.get('flags', None)
94 fl = v.get('flags', None)
95 flags = 0
95 flags = 0
96 if fl != None:
96 if fl != None:
97 for x in fl.split():
97 for x in fl.split():
98 flags |= revlog.flagstr(x)
98 flags |= revlog.flagstr(x)
99 elif self.revlogv1:
99 elif self.revlogv1:
100 flags = revlog.REVLOG_DEFAULT_FLAGS
100 flags = revlog.REVLOG_DEFAULT_FLAGS
101
101
102 v = self.revlogversion | flags
102 v = self.revlogversion | flags
103 self.manifest = manifest.manifest(self.sopener, v)
103 self.manifest = manifest.manifest(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
105
105
106 fallback = self.ui.config('ui', 'fallbackencoding')
106 fallback = self.ui.config('ui', 'fallbackencoding')
107 if fallback:
107 if fallback:
108 util._fallbackencoding = fallback
108 util._fallbackencoding = fallback
109
109
110 # the changelog might not have the inline index flag
110 # the changelog might not have the inline index flag
111 # on. If the format of the changelog is the same as found in
111 # on. If the format of the changelog is the same as found in
112 # .hgrc, apply any flags found in the .hgrc as well.
112 # .hgrc, apply any flags found in the .hgrc as well.
113 # Otherwise, just version from the changelog
113 # Otherwise, just version from the changelog
114 v = self.changelog.version
114 v = self.changelog.version
115 if v == self.revlogversion:
115 if v == self.revlogversion:
116 v |= flags
116 v |= flags
117 self.revlogversion = v
117 self.revlogversion = v
118
118
119 self.tagscache = None
119 self.tagscache = None
120 self.branchcache = None
120 self.branchcache = None
121 self.nodetagscache = None
121 self.nodetagscache = None
122 self.encodepats = None
122 self.encodepats = None
123 self.decodepats = None
123 self.decodepats = None
124 self.transhandle = None
124 self.transhandle = None
125
125
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def url(self):
128 def url(self):
129 return 'file:' + self.root
129 return 'file:' + self.root
130
130
131 def hook(self, name, throw=False, **args):
131 def hook(self, name, throw=False, **args):
132 def callhook(hname, funcname):
132 def callhook(hname, funcname):
133 '''call python hook. hook is callable object, looked up as
133 '''call python hook. hook is callable object, looked up as
134 name in python module. if callable returns "true", hook
134 name in python module. if callable returns "true", hook
135 fails, else passes. if hook raises exception, treated as
135 fails, else passes. if hook raises exception, treated as
136 hook failure. exception propagates if throw is "true".
136 hook failure. exception propagates if throw is "true".
137
137
138 reason for "true" meaning "hook failed" is so that
138 reason for "true" meaning "hook failed" is so that
139 unmodified commands (e.g. mercurial.commands.update) can
139 unmodified commands (e.g. mercurial.commands.update) can
140 be run as hooks without wrappers to convert return values.'''
140 be run as hooks without wrappers to convert return values.'''
141
141
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 d = funcname.rfind('.')
143 d = funcname.rfind('.')
144 if d == -1:
144 if d == -1:
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 % (hname, funcname))
146 % (hname, funcname))
147 modname = funcname[:d]
147 modname = funcname[:d]
148 try:
148 try:
149 obj = __import__(modname)
149 obj = __import__(modname)
150 except ImportError:
150 except ImportError:
151 try:
151 try:
152 # extensions are loaded with hgext_ prefix
152 # extensions are loaded with hgext_ prefix
153 obj = __import__("hgext_%s" % modname)
153 obj = __import__("hgext_%s" % modname)
154 except ImportError:
154 except ImportError:
155 raise util.Abort(_('%s hook is invalid '
155 raise util.Abort(_('%s hook is invalid '
156 '(import of "%s" failed)') %
156 '(import of "%s" failed)') %
157 (hname, modname))
157 (hname, modname))
158 try:
158 try:
159 for p in funcname.split('.')[1:]:
159 for p in funcname.split('.')[1:]:
160 obj = getattr(obj, p)
160 obj = getattr(obj, p)
161 except AttributeError, err:
161 except AttributeError, err:
162 raise util.Abort(_('%s hook is invalid '
162 raise util.Abort(_('%s hook is invalid '
163 '("%s" is not defined)') %
163 '("%s" is not defined)') %
164 (hname, funcname))
164 (hname, funcname))
165 if not callable(obj):
165 if not callable(obj):
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not callable)') %
167 '("%s" is not callable)') %
168 (hname, funcname))
168 (hname, funcname))
169 try:
169 try:
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 except (KeyboardInterrupt, util.SignalInterrupt):
171 except (KeyboardInterrupt, util.SignalInterrupt):
172 raise
172 raise
173 except Exception, exc:
173 except Exception, exc:
174 if isinstance(exc, util.Abort):
174 if isinstance(exc, util.Abort):
175 self.ui.warn(_('error: %s hook failed: %s\n') %
175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 (hname, exc.args[0]))
176 (hname, exc.args[0]))
177 else:
177 else:
178 self.ui.warn(_('error: %s hook raised an exception: '
178 self.ui.warn(_('error: %s hook raised an exception: '
179 '%s\n') % (hname, exc))
179 '%s\n') % (hname, exc))
180 if throw:
180 if throw:
181 raise
181 raise
182 self.ui.print_exc()
182 self.ui.print_exc()
183 return True
183 return True
184 if r:
184 if r:
185 if throw:
185 if throw:
186 raise util.Abort(_('%s hook failed') % hname)
186 raise util.Abort(_('%s hook failed') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 return r
188 return r
189
189
190 def runhook(name, cmd):
190 def runhook(name, cmd):
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 r = util.system(cmd, environ=env, cwd=self.root)
193 r = util.system(cmd, environ=env, cwd=self.root)
194 if r:
194 if r:
195 desc, r = util.explain_exit(r)
195 desc, r = util.explain_exit(r)
196 if throw:
196 if throw:
197 raise util.Abort(_('%s hook %s') % (name, desc))
197 raise util.Abort(_('%s hook %s') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 return r
199 return r
200
200
201 r = False
201 r = False
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 if hname.split(".", 1)[0] == name and cmd]
203 if hname.split(".", 1)[0] == name and cmd]
204 hooks.sort()
204 hooks.sort()
205 for hname, cmd in hooks:
205 for hname, cmd in hooks:
206 if cmd.startswith('python:'):
206 if cmd.startswith('python:'):
207 r = callhook(hname, cmd[7:].strip()) or r
207 r = callhook(hname, cmd[7:].strip()) or r
208 else:
208 else:
209 r = runhook(hname, cmd) or r
209 r = runhook(hname, cmd) or r
210 return r
210 return r
211
211
212 tag_disallowed = ':\r\n'
212 tag_disallowed = ':\r\n'
213
213
214 def tag(self, name, node, message, local, user, date):
214 def tag(self, name, node, message, local, user, date):
215 '''tag a revision with a symbolic name.
215 '''tag a revision with a symbolic name.
216
216
217 if local is True, the tag is stored in a per-repository file.
217 if local is True, the tag is stored in a per-repository file.
218 otherwise, it is stored in the .hgtags file, and a new
218 otherwise, it is stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tag in non-version-controlled file
223 local: whether to store tag in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for c in self.tag_disallowed:
232 for c in self.tag_disallowed:
233 if c in name:
233 if c in name:
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235
235
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237
237
238 if local:
238 if local:
239 # local tags are stored in the current charset
239 # local tags are stored in the current charset
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.hook('tag', node=hex(node), tag=name, local=local)
241 self.hook('tag', node=hex(node), tag=name, local=local)
242 return
242 return
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 # committed tags are stored in UTF-8
249 # committed tags are stored in UTF-8
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 self.wfile('.hgtags', 'ab').write(line)
251 self.wfile('.hgtags', 'ab').write(line)
252 if self.dirstate.state('.hgtags') == '?':
252 if self.dirstate.state('.hgtags') == '?':
253 self.add(['.hgtags'])
253 self.add(['.hgtags'])
254
254
255 self.commit(['.hgtags'], message, user, date)
255 self.commit(['.hgtags'], message, user, date)
256 self.hook('tag', node=hex(node), tag=name, local=local)
256 self.hook('tag', node=hex(node), tag=name, local=local)
257
257
258 def tags(self):
258 def tags(self):
259 '''return a mapping of tag to node'''
259 '''return a mapping of tag to node'''
260 if not self.tagscache:
260 if not self.tagscache:
261 self.tagscache = {}
261 self.tagscache = {}
262
262
263 def parsetag(line, context):
263 def parsetag(line, context):
264 if not line:
264 if not line:
265 return
265 return
266 s = l.split(" ", 1)
266 s = l.split(" ", 1)
267 if len(s) != 2:
267 if len(s) != 2:
268 self.ui.warn(_("%s: cannot parse entry\n") % context)
268 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 return
269 return
270 node, key = s
270 node, key = s
271 key = util.tolocal(key.strip()) # stored in UTF-8
271 key = util.tolocal(key.strip()) # stored in UTF-8
272 try:
272 try:
273 bin_n = bin(node)
273 bin_n = bin(node)
274 except TypeError:
274 except TypeError:
275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 (context, node))
276 (context, node))
277 return
277 return
278 if bin_n not in self.changelog.nodemap:
278 if bin_n not in self.changelog.nodemap:
279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 (context, key))
280 (context, key))
281 return
281 return
282 self.tagscache[key] = bin_n
282 self.tagscache[key] = bin_n
283
283
284 # read the tags file from each head, ending with the tip,
284 # read the tags file from each head, ending with the tip,
285 # and add each tag found to the map, with "newer" ones
285 # and add each tag found to the map, with "newer" ones
286 # taking precedence
286 # taking precedence
287 f = None
287 f = None
288 for rev, node, fnode in self._hgtagsnodes():
288 for rev, node, fnode in self._hgtagsnodes():
289 f = (f and f.filectx(fnode) or
289 f = (f and f.filectx(fnode) or
290 self.filectx('.hgtags', fileid=fnode))
290 self.filectx('.hgtags', fileid=fnode))
291 count = 0
291 count = 0
292 for l in f.data().splitlines():
292 for l in f.data().splitlines():
293 count += 1
293 count += 1
294 parsetag(l, _("%s, line %d") % (str(f), count))
294 parsetag(l, _("%s, line %d") % (str(f), count))
295
295
296 try:
296 try:
297 f = self.opener("localtags")
297 f = self.opener("localtags")
298 count = 0
298 count = 0
299 for l in f:
299 for l in f:
300 # localtags are stored in the local character set
300 # localtags are stored in the local character set
301 # while the internal tag table is stored in UTF-8
301 # while the internal tag table is stored in UTF-8
302 l = util.fromlocal(l)
302 l = util.fromlocal(l)
303 count += 1
303 count += 1
304 parsetag(l, _("localtags, line %d") % count)
304 parsetag(l, _("localtags, line %d") % count)
305 except IOError:
305 except IOError:
306 pass
306 pass
307
307
308 self.tagscache['tip'] = self.changelog.tip()
308 self.tagscache['tip'] = self.changelog.tip()
309
309
310 return self.tagscache
310 return self.tagscache
311
311
312 def _hgtagsnodes(self):
312 def _hgtagsnodes(self):
313 heads = self.heads()
313 heads = self.heads()
314 heads.reverse()
314 heads.reverse()
315 last = {}
315 last = {}
316 ret = []
316 ret = []
317 for node in heads:
317 for node in heads:
318 c = self.changectx(node)
318 c = self.changectx(node)
319 rev = c.rev()
319 rev = c.rev()
320 try:
320 try:
321 fnode = c.filenode('.hgtags')
321 fnode = c.filenode('.hgtags')
322 except repo.LookupError:
322 except repo.LookupError:
323 continue
323 continue
324 ret.append((rev, node, fnode))
324 ret.append((rev, node, fnode))
325 if fnode in last:
325 if fnode in last:
326 ret[last[fnode]] = None
326 ret[last[fnode]] = None
327 last[fnode] = len(ret) - 1
327 last[fnode] = len(ret) - 1
328 return [item for item in ret if item]
328 return [item for item in ret if item]
329
329
330 def tagslist(self):
330 def tagslist(self):
331 '''return a list of tags ordered by revision'''
331 '''return a list of tags ordered by revision'''
332 l = []
332 l = []
333 for t, n in self.tags().items():
333 for t, n in self.tags().items():
334 try:
334 try:
335 r = self.changelog.rev(n)
335 r = self.changelog.rev(n)
336 except:
336 except:
337 r = -2 # sort to the beginning of the list if unknown
337 r = -2 # sort to the beginning of the list if unknown
338 l.append((r, t, n))
338 l.append((r, t, n))
339 l.sort()
339 l.sort()
340 return [(t, n) for r, t, n in l]
340 return [(t, n) for r, t, n in l]
341
341
342 def nodetags(self, node):
342 def nodetags(self, node):
343 '''return the tags associated with a node'''
343 '''return the tags associated with a node'''
344 if not self.nodetagscache:
344 if not self.nodetagscache:
345 self.nodetagscache = {}
345 self.nodetagscache = {}
346 for t, n in self.tags().items():
346 for t, n in self.tags().items():
347 self.nodetagscache.setdefault(n, []).append(t)
347 self.nodetagscache.setdefault(n, []).append(t)
348 return self.nodetagscache.get(node, [])
348 return self.nodetagscache.get(node, [])
349
349
350 def _branchtags(self):
350 def _branchtags(self):
351 partial, last, lrev = self._readbranchcache()
351 partial, last, lrev = self._readbranchcache()
352
352
353 tiprev = self.changelog.count() - 1
353 tiprev = self.changelog.count() - 1
354 if lrev != tiprev:
354 if lrev != tiprev:
355 self._updatebranchcache(partial, lrev+1, tiprev+1)
355 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 self._writebranchcache(partial, self.changelog.tip(), tiprev)
356 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357
357
358 return partial
358 return partial
359
359
360 def branchtags(self):
360 def branchtags(self):
361 if self.branchcache is not None:
361 if self.branchcache is not None:
362 return self.branchcache
362 return self.branchcache
363
363
364 self.branchcache = {} # avoid recursion in changectx
364 self.branchcache = {} # avoid recursion in changectx
365 partial = self._branchtags()
365 partial = self._branchtags()
366
366
367 # the branch cache is stored on disk as UTF-8, but in the local
367 # the branch cache is stored on disk as UTF-8, but in the local
368 # charset internally
368 # charset internally
369 for k, v in partial.items():
369 for k, v in partial.items():
370 self.branchcache[util.tolocal(k)] = v
370 self.branchcache[util.tolocal(k)] = v
371 return self.branchcache
371 return self.branchcache
372
372
373 def _readbranchcache(self):
373 def _readbranchcache(self):
374 partial = {}
374 partial = {}
375 try:
375 try:
376 f = self.opener("branch.cache")
376 f = self.opener("branch.cache")
377 lines = f.read().split('\n')
377 lines = f.read().split('\n')
378 f.close()
378 f.close()
379 last, lrev = lines.pop(0).split(" ", 1)
379 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = bin(last), int(lrev)
380 last, lrev = bin(last), int(lrev)
381 if not (lrev < self.changelog.count() and
381 if not (lrev < self.changelog.count() and
382 self.changelog.node(lrev) == last): # sanity check
382 self.changelog.node(lrev) == last): # sanity check
383 # invalidate the cache
383 # invalidate the cache
384 raise ValueError('Invalid branch cache: unknown tip')
384 raise ValueError('Invalid branch cache: unknown tip')
385 for l in lines:
385 for l in lines:
386 if not l: continue
386 if not l: continue
387 node, label = l.split(" ", 1)
387 node, label = l.split(" ", 1)
388 partial[label.strip()] = bin(node)
388 partial[label.strip()] = bin(node)
389 except (KeyboardInterrupt, util.SignalInterrupt):
389 except (KeyboardInterrupt, util.SignalInterrupt):
390 raise
390 raise
391 except Exception, inst:
391 except Exception, inst:
392 if self.ui.debugflag:
392 if self.ui.debugflag:
393 self.ui.warn(str(inst), '\n')
393 self.ui.warn(str(inst), '\n')
394 partial, last, lrev = {}, nullid, nullrev
394 partial, last, lrev = {}, nullid, nullrev
395 return partial, last, lrev
395 return partial, last, lrev
396
396
397 def _writebranchcache(self, branches, tip, tiprev):
397 def _writebranchcache(self, branches, tip, tiprev):
398 try:
398 try:
399 f = self.opener("branch.cache", "w")
399 f = self.opener("branch.cache", "w")
400 f.write("%s %s\n" % (hex(tip), tiprev))
400 f.write("%s %s\n" % (hex(tip), tiprev))
401 for label, node in branches.iteritems():
401 for label, node in branches.iteritems():
402 f.write("%s %s\n" % (hex(node), label))
402 f.write("%s %s\n" % (hex(node), label))
403 except IOError:
403 except IOError:
404 pass
404 pass
405
405
406 def _updatebranchcache(self, partial, start, end):
406 def _updatebranchcache(self, partial, start, end):
407 for r in xrange(start, end):
407 for r in xrange(start, end):
408 c = self.changectx(r)
408 c = self.changectx(r)
409 b = c.branch()
409 b = c.branch()
410 partial[b] = c.node()
410 partial[b] = c.node()
411
411
412 def lookup(self, key):
412 def lookup(self, key):
413 if key == '.':
413 if key == '.':
414 key = self.dirstate.parents()[0]
414 key = self.dirstate.parents()[0]
415 if key == nullid:
415 if key == nullid:
416 raise repo.RepoError(_("no revision checked out"))
416 raise repo.RepoError(_("no revision checked out"))
417 elif key == 'null':
417 elif key == 'null':
418 return nullid
418 return nullid
419 n = self.changelog._match(key)
419 n = self.changelog._match(key)
420 if n:
420 if n:
421 return n
421 return n
422 if key in self.tags():
422 if key in self.tags():
423 return self.tags()[key]
423 return self.tags()[key]
424 if key in self.branchtags():
424 if key in self.branchtags():
425 return self.branchtags()[key]
425 return self.branchtags()[key]
426 n = self.changelog._partialmatch(key)
426 n = self.changelog._partialmatch(key)
427 if n:
427 if n:
428 return n
428 return n
429 raise repo.RepoError(_("unknown revision '%s'") % key)
429 raise repo.RepoError(_("unknown revision '%s'") % key)
430
430
431 def dev(self):
431 def dev(self):
432 return os.lstat(self.path).st_dev
432 return os.lstat(self.path).st_dev
433
433
434 def local(self):
434 def local(self):
435 return True
435 return True
436
436
437 def join(self, f):
437 def join(self, f):
438 return os.path.join(self.path, f)
438 return os.path.join(self.path, f)
439
439
440 def sjoin(self, f):
440 def sjoin(self, f):
441 f = self.encodefn(f)
441 f = self.encodefn(f)
442 return os.path.join(self.spath, f)
442 return os.path.join(self.spath, f)
443
443
444 def wjoin(self, f):
444 def wjoin(self, f):
445 return os.path.join(self.root, f)
445 return os.path.join(self.root, f)
446
446
447 def file(self, f):
447 def file(self, f):
448 if f[0] == '/':
448 if f[0] == '/':
449 f = f[1:]
449 f = f[1:]
450 return filelog.filelog(self.sopener, f, self.revlogversion)
450 return filelog.filelog(self.sopener, f, self.revlogversion)
451
451
452 def changectx(self, changeid=None):
452 def changectx(self, changeid=None):
453 return context.changectx(self, changeid)
453 return context.changectx(self, changeid)
454
454
455 def workingctx(self):
455 def workingctx(self):
456 return context.workingctx(self)
456 return context.workingctx(self)
457
457
458 def parents(self, changeid=None):
458 def parents(self, changeid=None):
459 '''
459 '''
460 get list of changectxs for parents of changeid or working directory
460 get list of changectxs for parents of changeid or working directory
461 '''
461 '''
462 if changeid is None:
462 if changeid is None:
463 pl = self.dirstate.parents()
463 pl = self.dirstate.parents()
464 else:
464 else:
465 n = self.changelog.lookup(changeid)
465 n = self.changelog.lookup(changeid)
466 pl = self.changelog.parents(n)
466 pl = self.changelog.parents(n)
467 if pl[1] == nullid:
467 if pl[1] == nullid:
468 return [self.changectx(pl[0])]
468 return [self.changectx(pl[0])]
469 return [self.changectx(pl[0]), self.changectx(pl[1])]
469 return [self.changectx(pl[0]), self.changectx(pl[1])]
470
470
471 def filectx(self, path, changeid=None, fileid=None):
471 def filectx(self, path, changeid=None, fileid=None):
472 """changeid can be a changeset revision, node, or tag.
472 """changeid can be a changeset revision, node, or tag.
473 fileid can be a file revision or node."""
473 fileid can be a file revision or node."""
474 return context.filectx(self, path, changeid, fileid)
474 return context.filectx(self, path, changeid, fileid)
475
475
476 def getcwd(self):
476 def getcwd(self):
477 return self.dirstate.getcwd()
477 return self.dirstate.getcwd()
478
478
479 def wfile(self, f, mode='r'):
479 def wfile(self, f, mode='r'):
480 return self.wopener(f, mode)
480 return self.wopener(f, mode)
481
481
482 def wread(self, filename):
482 def wread(self, filename):
483 if self.encodepats == None:
483 if self.encodepats == None:
484 l = []
484 l = []
485 for pat, cmd in self.ui.configitems("encode"):
485 for pat, cmd in self.ui.configitems("encode"):
486 mf = util.matcher(self.root, "", [pat], [], [])[1]
486 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 l.append((mf, cmd))
487 l.append((mf, cmd))
488 self.encodepats = l
488 self.encodepats = l
489
489
490 data = self.wopener(filename, 'r').read()
490 data = self.wopener(filename, 'r').read()
491
491
492 for mf, cmd in self.encodepats:
492 for mf, cmd in self.encodepats:
493 if mf(filename):
493 if mf(filename):
494 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 data = util.filter(data, cmd)
495 data = util.filter(data, cmd)
496 break
496 break
497
497
498 return data
498 return data
499
499
500 def wwrite(self, filename, data, fd=None):
500 def wwrite(self, filename, data, fd=None):
501 if self.decodepats == None:
501 if self.decodepats == None:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems("decode"):
503 for pat, cmd in self.ui.configitems("decode"):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 l.append((mf, cmd))
505 l.append((mf, cmd))
506 self.decodepats = l
506 self.decodepats = l
507
507
508 for mf, cmd in self.decodepats:
508 for mf, cmd in self.decodepats:
509 if mf(filename):
509 if mf(filename):
510 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
510 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
511 data = util.filter(data, cmd)
511 data = util.filter(data, cmd)
512 break
512 break
513
513
514 if fd:
514 if fd:
515 return fd.write(data)
515 return fd.write(data)
516 return self.wopener(filename, 'w').write(data)
516 return self.wopener(filename, 'w').write(data)
517
517
518 def transaction(self):
518 def transaction(self):
519 tr = self.transhandle
519 tr = self.transhandle
520 if tr != None and tr.running():
520 if tr != None and tr.running():
521 return tr.nest()
521 return tr.nest()
522
522
523 # save dirstate for rollback
523 # save dirstate for rollback
524 try:
524 try:
525 ds = self.opener("dirstate").read()
525 ds = self.opener("dirstate").read()
526 except IOError:
526 except IOError:
527 ds = ""
527 ds = ""
528 self.opener("journal.dirstate", "w").write(ds)
528 self.opener("journal.dirstate", "w").write(ds)
529
529
530 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 renames = [(self.sjoin("journal"), self.sjoin("undo")),
531 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
531 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
532 tr = transaction.transaction(self.ui.warn, self.sopener,
532 tr = transaction.transaction(self.ui.warn, self.sopener,
533 self.sjoin("journal"),
533 self.sjoin("journal"),
534 aftertrans(renames))
534 aftertrans(renames))
535 self.transhandle = tr
535 self.transhandle = tr
536 return tr
536 return tr
537
537
538 def recover(self):
538 def recover(self):
539 l = self.lock()
539 l = self.lock()
540 if os.path.exists(self.sjoin("journal")):
540 if os.path.exists(self.sjoin("journal")):
541 self.ui.status(_("rolling back interrupted transaction\n"))
541 self.ui.status(_("rolling back interrupted transaction\n"))
542 transaction.rollback(self.sopener, self.sjoin("journal"))
542 transaction.rollback(self.sopener, self.sjoin("journal"))
543 self.reload()
543 self.reload()
544 return True
544 return True
545 else:
545 else:
546 self.ui.warn(_("no interrupted transaction available\n"))
546 self.ui.warn(_("no interrupted transaction available\n"))
547 return False
547 return False
548
548
549 def rollback(self, wlock=None):
549 def rollback(self, wlock=None):
550 if not wlock:
550 if not wlock:
551 wlock = self.wlock()
551 wlock = self.wlock()
552 l = self.lock()
552 l = self.lock()
553 if os.path.exists(self.sjoin("undo")):
553 if os.path.exists(self.sjoin("undo")):
554 self.ui.status(_("rolling back last transaction\n"))
554 self.ui.status(_("rolling back last transaction\n"))
555 transaction.rollback(self.sopener, self.sjoin("undo"))
555 transaction.rollback(self.sopener, self.sjoin("undo"))
556 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
556 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
557 self.reload()
557 self.reload()
558 self.wreload()
558 self.wreload()
559 else:
559 else:
560 self.ui.warn(_("no rollback information available\n"))
560 self.ui.warn(_("no rollback information available\n"))
561
561
562 def wreload(self):
562 def wreload(self):
563 self.dirstate.read()
563 self.dirstate.read()
564
564
565 def reload(self):
565 def reload(self):
566 self.changelog.load()
566 self.changelog.load()
567 self.manifest.load()
567 self.manifest.load()
568 self.tagscache = None
568 self.tagscache = None
569 self.nodetagscache = None
569 self.nodetagscache = None
570
570
571 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
571 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
572 desc=None):
572 desc=None):
573 try:
573 try:
574 l = lock.lock(lockname, 0, releasefn, desc=desc)
574 l = lock.lock(lockname, 0, releasefn, desc=desc)
575 except lock.LockHeld, inst:
575 except lock.LockHeld, inst:
576 if not wait:
576 if not wait:
577 raise
577 raise
578 self.ui.warn(_("waiting for lock on %s held by %r\n") %
578 self.ui.warn(_("waiting for lock on %s held by %r\n") %
579 (desc, inst.locker))
579 (desc, inst.locker))
580 # default to 600 seconds timeout
580 # default to 600 seconds timeout
581 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
581 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
582 releasefn, desc=desc)
582 releasefn, desc=desc)
583 if acquirefn:
583 if acquirefn:
584 acquirefn()
584 acquirefn()
585 return l
585 return l
586
586
587 def lock(self, wait=1):
587 def lock(self, wait=1):
588 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
588 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
589 desc=_('repository %s') % self.origroot)
589 desc=_('repository %s') % self.origroot)
590
590
591 def wlock(self, wait=1):
591 def wlock(self, wait=1):
592 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
592 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
593 self.wreload,
593 self.wreload,
594 desc=_('working directory of %s') % self.origroot)
594 desc=_('working directory of %s') % self.origroot)
595
595
596 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
596 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
597 """
597 """
598 commit an individual file as part of a larger transaction
598 commit an individual file as part of a larger transaction
599 """
599 """
600
600
601 t = self.wread(fn)
601 t = self.wread(fn)
602 fl = self.file(fn)
602 fl = self.file(fn)
603 fp1 = manifest1.get(fn, nullid)
603 fp1 = manifest1.get(fn, nullid)
604 fp2 = manifest2.get(fn, nullid)
604 fp2 = manifest2.get(fn, nullid)
605
605
606 meta = {}
606 meta = {}
607 cp = self.dirstate.copied(fn)
607 cp = self.dirstate.copied(fn)
608 if cp:
608 if cp:
609 # Mark the new revision of this file as a copy of another
609 # Mark the new revision of this file as a copy of another
610 # file. This copy data will effectively act as a parent
610 # file. This copy data will effectively act as a parent
611 # of this new revision. If this is a merge, the first
611 # of this new revision. If this is a merge, the first
612 # parent will be the nullid (meaning "look up the copy data")
612 # parent will be the nullid (meaning "look up the copy data")
613 # and the second one will be the other parent. For example:
613 # and the second one will be the other parent. For example:
614 #
614 #
615 # 0 --- 1 --- 3 rev1 changes file foo
615 # 0 --- 1 --- 3 rev1 changes file foo
616 # \ / rev2 renames foo to bar and changes it
616 # \ / rev2 renames foo to bar and changes it
617 # \- 2 -/ rev3 should have bar with all changes and
617 # \- 2 -/ rev3 should have bar with all changes and
618 # should record that bar descends from
618 # should record that bar descends from
619 # bar in rev2 and foo in rev1
619 # bar in rev2 and foo in rev1
620 #
620 #
621 # this allows this merge to succeed:
621 # this allows this merge to succeed:
622 #
622 #
623 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
623 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
624 # \ / merging rev3 and rev4 should use bar@rev2
624 # \ / merging rev3 and rev4 should use bar@rev2
625 # \- 2 --- 4 as the merge base
625 # \- 2 --- 4 as the merge base
626 #
626 #
627 meta["copy"] = cp
627 meta["copy"] = cp
628 if not manifest2: # not a branch merge
628 if not manifest2: # not a branch merge
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 fp2 = nullid
630 fp2 = nullid
631 elif fp2 != nullid: # copied on remote side
631 elif fp2 != nullid: # copied on remote side
632 meta["copyrev"] = hex(manifest1.get(cp, nullid))
632 meta["copyrev"] = hex(manifest1.get(cp, nullid))
633 elif fp1 != nullid: # copied on local side, reversed
633 elif fp1 != nullid: # copied on local side, reversed
634 meta["copyrev"] = hex(manifest2.get(cp))
634 meta["copyrev"] = hex(manifest2.get(cp))
635 fp2 = fp1
635 fp2 = fp1
636 else: # directory rename
636 else: # directory rename
637 meta["copyrev"] = hex(manifest1.get(cp, nullid))
637 meta["copyrev"] = hex(manifest1.get(cp, nullid))
638 self.ui.debug(_(" %s: copy %s:%s\n") %
638 self.ui.debug(_(" %s: copy %s:%s\n") %
639 (fn, cp, meta["copyrev"]))
639 (fn, cp, meta["copyrev"]))
640 fp1 = nullid
640 fp1 = nullid
641 elif fp2 != nullid:
641 elif fp2 != nullid:
642 # is one parent an ancestor of the other?
642 # is one parent an ancestor of the other?
643 fpa = fl.ancestor(fp1, fp2)
643 fpa = fl.ancestor(fp1, fp2)
644 if fpa == fp1:
644 if fpa == fp1:
645 fp1, fp2 = fp2, nullid
645 fp1, fp2 = fp2, nullid
646 elif fpa == fp2:
646 elif fpa == fp2:
647 fp2 = nullid
647 fp2 = nullid
648
648
649 # is the file unmodified from the parent? report existing entry
649 # is the file unmodified from the parent? report existing entry
650 if fp2 == nullid and not fl.cmp(fp1, t):
650 if fp2 == nullid and not fl.cmp(fp1, t):
651 return fp1
651 return fp1
652
652
653 changelist.append(fn)
653 changelist.append(fn)
654 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
654 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
655
655
656 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
656 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
657 if p1 is None:
657 if p1 is None:
658 p1, p2 = self.dirstate.parents()
658 p1, p2 = self.dirstate.parents()
659 return self.commit(files=files, text=text, user=user, date=date,
659 return self.commit(files=files, text=text, user=user, date=date,
660 p1=p1, p2=p2, wlock=wlock)
660 p1=p1, p2=p2, wlock=wlock)
661
661
662 def commit(self, files=None, text="", user=None, date=None,
662 def commit(self, files=None, text="", user=None, date=None,
663 match=util.always, force=False, lock=None, wlock=None,
663 match=util.always, force=False, lock=None, wlock=None,
664 force_editor=False, p1=None, p2=None, extra={}):
664 force_editor=False, p1=None, p2=None, extra={}):
665
665
666 commit = []
666 commit = []
667 remove = []
667 remove = []
668 changed = []
668 changed = []
669 use_dirstate = (p1 is None) # not rawcommit
669 use_dirstate = (p1 is None) # not rawcommit
670 extra = extra.copy()
670 extra = extra.copy()
671
671
672 if use_dirstate:
672 if use_dirstate:
673 if files:
673 if files:
674 for f in files:
674 for f in files:
675 s = self.dirstate.state(f)
675 s = self.dirstate.state(f)
676 if s in 'nmai':
676 if s in 'nmai':
677 commit.append(f)
677 commit.append(f)
678 elif s == 'r':
678 elif s == 'r':
679 remove.append(f)
679 remove.append(f)
680 else:
680 else:
681 self.ui.warn(_("%s not tracked!\n") % f)
681 self.ui.warn(_("%s not tracked!\n") % f)
682 else:
682 else:
683 changes = self.status(match=match)[:5]
683 changes = self.status(match=match)[:5]
684 modified, added, removed, deleted, unknown = changes
684 modified, added, removed, deleted, unknown = changes
685 commit = modified + added
685 commit = modified + added
686 remove = removed
686 remove = removed
687 else:
687 else:
688 commit = files
688 commit = files
689
689
690 if use_dirstate:
690 if use_dirstate:
691 p1, p2 = self.dirstate.parents()
691 p1, p2 = self.dirstate.parents()
692 update_dirstate = True
692 update_dirstate = True
693 else:
693 else:
694 p1, p2 = p1, p2 or nullid
694 p1, p2 = p1, p2 or nullid
695 update_dirstate = (self.dirstate.parents()[0] == p1)
695 update_dirstate = (self.dirstate.parents()[0] == p1)
696
696
697 c1 = self.changelog.read(p1)
697 c1 = self.changelog.read(p1)
698 c2 = self.changelog.read(p2)
698 c2 = self.changelog.read(p2)
699 m1 = self.manifest.read(c1[0]).copy()
699 m1 = self.manifest.read(c1[0]).copy()
700 m2 = self.manifest.read(c2[0])
700 m2 = self.manifest.read(c2[0])
701
701
702 if use_dirstate:
702 if use_dirstate:
703 branchname = self.workingctx().branch()
703 branchname = self.workingctx().branch()
704 try:
704 try:
705 branchname = branchname.decode('UTF-8').encode('UTF-8')
705 branchname = branchname.decode('UTF-8').encode('UTF-8')
706 except UnicodeDecodeError:
706 except UnicodeDecodeError:
707 raise util.Abort(_('branch name not in UTF-8!'))
707 raise util.Abort(_('branch name not in UTF-8!'))
708 else:
708 else:
709 branchname = ""
709 branchname = ""
710
710
711 if use_dirstate:
711 if use_dirstate:
712 oldname = c1[5].get("branch") or "default" # stored in UTF-8
712 oldname = c1[5].get("branch") # stored in UTF-8
713 if not commit and not remove and not force and p2 == nullid and \
713 if not commit and not remove and not force and p2 == nullid and \
714 branchname == oldname:
714 branchname == oldname:
715 self.ui.status(_("nothing changed\n"))
715 self.ui.status(_("nothing changed\n"))
716 return None
716 return None
717
717
718 xp1 = hex(p1)
718 xp1 = hex(p1)
719 if p2 == nullid: xp2 = ''
719 if p2 == nullid: xp2 = ''
720 else: xp2 = hex(p2)
720 else: xp2 = hex(p2)
721
721
722 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
722 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
723
723
724 if not wlock:
724 if not wlock:
725 wlock = self.wlock()
725 wlock = self.wlock()
726 if not lock:
726 if not lock:
727 lock = self.lock()
727 lock = self.lock()
728 tr = self.transaction()
728 tr = self.transaction()
729
729
730 # check in files
730 # check in files
731 new = {}
731 new = {}
732 linkrev = self.changelog.count()
732 linkrev = self.changelog.count()
733 commit.sort()
733 commit.sort()
734 for f in commit:
734 for f in commit:
735 self.ui.note(f + "\n")
735 self.ui.note(f + "\n")
736 try:
736 try:
737 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
737 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
738 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
738 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
739 except IOError:
739 except IOError:
740 if use_dirstate:
740 if use_dirstate:
741 self.ui.warn(_("trouble committing %s!\n") % f)
741 self.ui.warn(_("trouble committing %s!\n") % f)
742 raise
742 raise
743 else:
743 else:
744 remove.append(f)
744 remove.append(f)
745
745
746 # update manifest
746 # update manifest
747 m1.update(new)
747 m1.update(new)
748 remove.sort()
748 remove.sort()
749
749
750 for f in remove:
750 for f in remove:
751 if f in m1:
751 if f in m1:
752 del m1[f]
752 del m1[f]
753 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
753 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
754
754
755 # add changeset
755 # add changeset
756 new = new.keys()
756 new = new.keys()
757 new.sort()
757 new.sort()
758
758
759 user = user or self.ui.username()
759 user = user or self.ui.username()
760 if not text or force_editor:
760 if not text or force_editor:
761 edittext = []
761 edittext = []
762 if text:
762 if text:
763 edittext.append(text)
763 edittext.append(text)
764 edittext.append("")
764 edittext.append("")
765 edittext.append("HG: user: %s" % user)
765 edittext.append("HG: user: %s" % user)
766 if p2 != nullid:
766 if p2 != nullid:
767 edittext.append("HG: branch merge")
767 edittext.append("HG: branch merge")
768 edittext.extend(["HG: changed %s" % f for f in changed])
768 edittext.extend(["HG: changed %s" % f for f in changed])
769 edittext.extend(["HG: removed %s" % f for f in remove])
769 edittext.extend(["HG: removed %s" % f for f in remove])
770 if not changed and not remove:
770 if not changed and not remove:
771 edittext.append("HG: no files changed")
771 edittext.append("HG: no files changed")
772 edittext.append("")
772 edittext.append("")
773 # run editor in the repository root
773 # run editor in the repository root
774 olddir = os.getcwd()
774 olddir = os.getcwd()
775 os.chdir(self.root)
775 os.chdir(self.root)
776 text = self.ui.edit("\n".join(edittext), user)
776 text = self.ui.edit("\n".join(edittext), user)
777 os.chdir(olddir)
777 os.chdir(olddir)
778
778
779 lines = [line.rstrip() for line in text.rstrip().splitlines()]
779 lines = [line.rstrip() for line in text.rstrip().splitlines()]
780 while lines and not lines[0]:
780 while lines and not lines[0]:
781 del lines[0]
781 del lines[0]
782 if not lines:
782 if not lines:
783 return None
783 return None
784 text = '\n'.join(lines)
784 text = '\n'.join(lines)
785 if branchname:
785 if branchname:
786 extra["branch"] = branchname
786 extra["branch"] = branchname
787 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
787 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
788 user, date, extra)
788 user, date, extra)
789 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
789 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
790 parent2=xp2)
790 parent2=xp2)
791 tr.close()
791 tr.close()
792
792
793 if use_dirstate or update_dirstate:
793 if use_dirstate or update_dirstate:
794 self.dirstate.setparents(n)
794 self.dirstate.setparents(n)
795 if use_dirstate:
795 if use_dirstate:
796 self.dirstate.update(new, "n")
796 self.dirstate.update(new, "n")
797 self.dirstate.forget(remove)
797 self.dirstate.forget(remove)
798
798
799 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
799 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
800 return n
800 return n
801
801
802 def walk(self, node=None, files=[], match=util.always, badmatch=None):
802 def walk(self, node=None, files=[], match=util.always, badmatch=None):
803 '''
803 '''
804 walk recursively through the directory tree or a given
804 walk recursively through the directory tree or a given
805 changeset, finding all files matched by the match
805 changeset, finding all files matched by the match
806 function
806 function
807
807
808 results are yielded in a tuple (src, filename), where src
808 results are yielded in a tuple (src, filename), where src
809 is one of:
809 is one of:
810 'f' the file was found in the directory tree
810 'f' the file was found in the directory tree
811 'm' the file was only in the dirstate and not in the tree
811 'm' the file was only in the dirstate and not in the tree
812 'b' file was not found and matched badmatch
812 'b' file was not found and matched badmatch
813 '''
813 '''
814
814
815 if node:
815 if node:
816 fdict = dict.fromkeys(files)
816 fdict = dict.fromkeys(files)
817 for fn in self.manifest.read(self.changelog.read(node)[0]):
817 for fn in self.manifest.read(self.changelog.read(node)[0]):
818 for ffn in fdict:
818 for ffn in fdict:
819 # match if the file is the exact name or a directory
819 # match if the file is the exact name or a directory
820 if ffn == fn or fn.startswith("%s/" % ffn):
820 if ffn == fn or fn.startswith("%s/" % ffn):
821 del fdict[ffn]
821 del fdict[ffn]
822 break
822 break
823 if match(fn):
823 if match(fn):
824 yield 'm', fn
824 yield 'm', fn
825 for fn in fdict:
825 for fn in fdict:
826 if badmatch and badmatch(fn):
826 if badmatch and badmatch(fn):
827 if match(fn):
827 if match(fn):
828 yield 'b', fn
828 yield 'b', fn
829 else:
829 else:
830 self.ui.warn(_('%s: No such file in rev %s\n') % (
830 self.ui.warn(_('%s: No such file in rev %s\n') % (
831 util.pathto(self.getcwd(), fn), short(node)))
831 util.pathto(self.getcwd(), fn), short(node)))
832 else:
832 else:
833 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
833 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
834 yield src, fn
834 yield src, fn
835
835
836 def status(self, node1=None, node2=None, files=[], match=util.always,
836 def status(self, node1=None, node2=None, files=[], match=util.always,
837 wlock=None, list_ignored=False, list_clean=False):
837 wlock=None, list_ignored=False, list_clean=False):
838 """return status of files between two nodes or node and working directory
838 """return status of files between two nodes or node and working directory
839
839
840 If node1 is None, use the first dirstate parent instead.
840 If node1 is None, use the first dirstate parent instead.
841 If node2 is None, compare node1 with working directory.
841 If node2 is None, compare node1 with working directory.
842 """
842 """
843
843
844 def fcmp(fn, mf):
844 def fcmp(fn, mf):
845 t1 = self.wread(fn)
845 t1 = self.wread(fn)
846 return self.file(fn).cmp(mf.get(fn, nullid), t1)
846 return self.file(fn).cmp(mf.get(fn, nullid), t1)
847
847
848 def mfmatches(node):
848 def mfmatches(node):
849 change = self.changelog.read(node)
849 change = self.changelog.read(node)
850 mf = self.manifest.read(change[0]).copy()
850 mf = self.manifest.read(change[0]).copy()
851 for fn in mf.keys():
851 for fn in mf.keys():
852 if not match(fn):
852 if not match(fn):
853 del mf[fn]
853 del mf[fn]
854 return mf
854 return mf
855
855
856 modified, added, removed, deleted, unknown = [], [], [], [], []
856 modified, added, removed, deleted, unknown = [], [], [], [], []
857 ignored, clean = [], []
857 ignored, clean = [], []
858
858
859 compareworking = False
859 compareworking = False
860 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
860 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
861 compareworking = True
861 compareworking = True
862
862
863 if not compareworking:
863 if not compareworking:
864 # read the manifest from node1 before the manifest from node2,
864 # read the manifest from node1 before the manifest from node2,
865 # so that we'll hit the manifest cache if we're going through
865 # so that we'll hit the manifest cache if we're going through
866 # all the revisions in parent->child order.
866 # all the revisions in parent->child order.
867 mf1 = mfmatches(node1)
867 mf1 = mfmatches(node1)
868
868
869 # are we comparing the working directory?
869 # are we comparing the working directory?
870 if not node2:
870 if not node2:
871 if not wlock:
871 if not wlock:
872 try:
872 try:
873 wlock = self.wlock(wait=0)
873 wlock = self.wlock(wait=0)
874 except lock.LockException:
874 except lock.LockException:
875 wlock = None
875 wlock = None
876 (lookup, modified, added, removed, deleted, unknown,
876 (lookup, modified, added, removed, deleted, unknown,
877 ignored, clean) = self.dirstate.status(files, match,
877 ignored, clean) = self.dirstate.status(files, match,
878 list_ignored, list_clean)
878 list_ignored, list_clean)
879
879
880 # are we comparing working dir against its parent?
880 # are we comparing working dir against its parent?
881 if compareworking:
881 if compareworking:
882 if lookup:
882 if lookup:
883 # do a full compare of any files that might have changed
883 # do a full compare of any files that might have changed
884 mf2 = mfmatches(self.dirstate.parents()[0])
884 mf2 = mfmatches(self.dirstate.parents()[0])
885 for f in lookup:
885 for f in lookup:
886 if fcmp(f, mf2):
886 if fcmp(f, mf2):
887 modified.append(f)
887 modified.append(f)
888 else:
888 else:
889 clean.append(f)
889 clean.append(f)
890 if wlock is not None:
890 if wlock is not None:
891 self.dirstate.update([f], "n")
891 self.dirstate.update([f], "n")
892 else:
892 else:
893 # we are comparing working dir against non-parent
893 # we are comparing working dir against non-parent
894 # generate a pseudo-manifest for the working dir
894 # generate a pseudo-manifest for the working dir
895 # XXX: create it in dirstate.py ?
895 # XXX: create it in dirstate.py ?
896 mf2 = mfmatches(self.dirstate.parents()[0])
896 mf2 = mfmatches(self.dirstate.parents()[0])
897 for f in lookup + modified + added:
897 for f in lookup + modified + added:
898 mf2[f] = ""
898 mf2[f] = ""
899 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
899 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
900 for f in removed:
900 for f in removed:
901 if f in mf2:
901 if f in mf2:
902 del mf2[f]
902 del mf2[f]
903 else:
903 else:
904 # we are comparing two revisions
904 # we are comparing two revisions
905 mf2 = mfmatches(node2)
905 mf2 = mfmatches(node2)
906
906
907 if not compareworking:
907 if not compareworking:
908 # flush lists from dirstate before comparing manifests
908 # flush lists from dirstate before comparing manifests
909 modified, added, clean = [], [], []
909 modified, added, clean = [], [], []
910
910
911 # make sure to sort the files so we talk to the disk in a
911 # make sure to sort the files so we talk to the disk in a
912 # reasonable order
912 # reasonable order
913 mf2keys = mf2.keys()
913 mf2keys = mf2.keys()
914 mf2keys.sort()
914 mf2keys.sort()
915 for fn in mf2keys:
915 for fn in mf2keys:
916 if mf1.has_key(fn):
916 if mf1.has_key(fn):
917 if mf1.flags(fn) != mf2.flags(fn) or \
917 if mf1.flags(fn) != mf2.flags(fn) or \
918 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
918 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
919 modified.append(fn)
919 modified.append(fn)
920 elif list_clean:
920 elif list_clean:
921 clean.append(fn)
921 clean.append(fn)
922 del mf1[fn]
922 del mf1[fn]
923 else:
923 else:
924 added.append(fn)
924 added.append(fn)
925
925
926 removed = mf1.keys()
926 removed = mf1.keys()
927
927
928 # sort and return results:
928 # sort and return results:
929 for l in modified, added, removed, deleted, unknown, ignored, clean:
929 for l in modified, added, removed, deleted, unknown, ignored, clean:
930 l.sort()
930 l.sort()
931 return (modified, added, removed, deleted, unknown, ignored, clean)
931 return (modified, added, removed, deleted, unknown, ignored, clean)
932
932
933 def add(self, list, wlock=None):
933 def add(self, list, wlock=None):
934 if not wlock:
934 if not wlock:
935 wlock = self.wlock()
935 wlock = self.wlock()
936 for f in list:
936 for f in list:
937 p = self.wjoin(f)
937 p = self.wjoin(f)
938 if not os.path.exists(p):
938 if not os.path.exists(p):
939 self.ui.warn(_("%s does not exist!\n") % f)
939 self.ui.warn(_("%s does not exist!\n") % f)
940 elif not os.path.isfile(p):
940 elif not os.path.isfile(p):
941 self.ui.warn(_("%s not added: only files supported currently\n")
941 self.ui.warn(_("%s not added: only files supported currently\n")
942 % f)
942 % f)
943 elif self.dirstate.state(f) in 'an':
943 elif self.dirstate.state(f) in 'an':
944 self.ui.warn(_("%s already tracked!\n") % f)
944 self.ui.warn(_("%s already tracked!\n") % f)
945 else:
945 else:
946 self.dirstate.update([f], "a")
946 self.dirstate.update([f], "a")
947
947
948 def forget(self, list, wlock=None):
948 def forget(self, list, wlock=None):
949 if not wlock:
949 if not wlock:
950 wlock = self.wlock()
950 wlock = self.wlock()
951 for f in list:
951 for f in list:
952 if self.dirstate.state(f) not in 'ai':
952 if self.dirstate.state(f) not in 'ai':
953 self.ui.warn(_("%s not added!\n") % f)
953 self.ui.warn(_("%s not added!\n") % f)
954 else:
954 else:
955 self.dirstate.forget([f])
955 self.dirstate.forget([f])
956
956
957 def remove(self, list, unlink=False, wlock=None):
957 def remove(self, list, unlink=False, wlock=None):
958 if unlink:
958 if unlink:
959 for f in list:
959 for f in list:
960 try:
960 try:
961 util.unlink(self.wjoin(f))
961 util.unlink(self.wjoin(f))
962 except OSError, inst:
962 except OSError, inst:
963 if inst.errno != errno.ENOENT:
963 if inst.errno != errno.ENOENT:
964 raise
964 raise
965 if not wlock:
965 if not wlock:
966 wlock = self.wlock()
966 wlock = self.wlock()
967 for f in list:
967 for f in list:
968 p = self.wjoin(f)
968 p = self.wjoin(f)
969 if os.path.exists(p):
969 if os.path.exists(p):
970 self.ui.warn(_("%s still exists!\n") % f)
970 self.ui.warn(_("%s still exists!\n") % f)
971 elif self.dirstate.state(f) == 'a':
971 elif self.dirstate.state(f) == 'a':
972 self.dirstate.forget([f])
972 self.dirstate.forget([f])
973 elif f not in self.dirstate:
973 elif f not in self.dirstate:
974 self.ui.warn(_("%s not tracked!\n") % f)
974 self.ui.warn(_("%s not tracked!\n") % f)
975 else:
975 else:
976 self.dirstate.update([f], "r")
976 self.dirstate.update([f], "r")
977
977
978 def undelete(self, list, wlock=None):
978 def undelete(self, list, wlock=None):
979 p = self.dirstate.parents()[0]
979 p = self.dirstate.parents()[0]
980 mn = self.changelog.read(p)[0]
980 mn = self.changelog.read(p)[0]
981 m = self.manifest.read(mn)
981 m = self.manifest.read(mn)
982 if not wlock:
982 if not wlock:
983 wlock = self.wlock()
983 wlock = self.wlock()
984 for f in list:
984 for f in list:
985 if self.dirstate.state(f) not in "r":
985 if self.dirstate.state(f) not in "r":
986 self.ui.warn("%s not removed!\n" % f)
986 self.ui.warn("%s not removed!\n" % f)
987 else:
987 else:
988 t = self.file(f).read(m[f])
988 t = self.file(f).read(m[f])
989 self.wwrite(f, t)
989 self.wwrite(f, t)
990 util.set_exec(self.wjoin(f), m.execf(f))
990 util.set_exec(self.wjoin(f), m.execf(f))
991 self.dirstate.update([f], "n")
991 self.dirstate.update([f], "n")
992
992
993 def copy(self, source, dest, wlock=None):
993 def copy(self, source, dest, wlock=None):
994 p = self.wjoin(dest)
994 p = self.wjoin(dest)
995 if not os.path.exists(p):
995 if not os.path.exists(p):
996 self.ui.warn(_("%s does not exist!\n") % dest)
996 self.ui.warn(_("%s does not exist!\n") % dest)
997 elif not os.path.isfile(p):
997 elif not os.path.isfile(p):
998 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
998 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
999 else:
999 else:
1000 if not wlock:
1000 if not wlock:
1001 wlock = self.wlock()
1001 wlock = self.wlock()
1002 if self.dirstate.state(dest) == '?':
1002 if self.dirstate.state(dest) == '?':
1003 self.dirstate.update([dest], "a")
1003 self.dirstate.update([dest], "a")
1004 self.dirstate.copy(source, dest)
1004 self.dirstate.copy(source, dest)
1005
1005
1006 def heads(self, start=None):
1006 def heads(self, start=None):
1007 heads = self.changelog.heads(start)
1007 heads = self.changelog.heads(start)
1008 # sort the output in rev descending order
1008 # sort the output in rev descending order
1009 heads = [(-self.changelog.rev(h), h) for h in heads]
1009 heads = [(-self.changelog.rev(h), h) for h in heads]
1010 heads.sort()
1010 heads.sort()
1011 return [n for (r, n) in heads]
1011 return [n for (r, n) in heads]
1012
1012
1013 # branchlookup returns a dict giving a list of branches for
1013 # branchlookup returns a dict giving a list of branches for
1014 # each head. A branch is defined as the tag of a node or
1014 # each head. A branch is defined as the tag of a node or
1015 # the branch of the node's parents. If a node has multiple
1015 # the branch of the node's parents. If a node has multiple
1016 # branch tags, tags are eliminated if they are visible from other
1016 # branch tags, tags are eliminated if they are visible from other
1017 # branch tags.
1017 # branch tags.
1018 #
1018 #
1019 # So, for this graph: a->b->c->d->e
1019 # So, for this graph: a->b->c->d->e
1020 # \ /
1020 # \ /
1021 # aa -----/
1021 # aa -----/
1022 # a has tag 2.6.12
1022 # a has tag 2.6.12
1023 # d has tag 2.6.13
1023 # d has tag 2.6.13
1024 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1024 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1025 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1025 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1026 # from the list.
1026 # from the list.
1027 #
1027 #
1028 # It is possible that more than one head will have the same branch tag.
1028 # It is possible that more than one head will have the same branch tag.
1029 # callers need to check the result for multiple heads under the same
1029 # callers need to check the result for multiple heads under the same
1030 # branch tag if that is a problem for them (ie checkout of a specific
1030 # branch tag if that is a problem for them (ie checkout of a specific
1031 # branch).
1031 # branch).
1032 #
1032 #
1033 # passing in a specific branch will limit the depth of the search
1033 # passing in a specific branch will limit the depth of the search
1034 # through the parents. It won't limit the branches returned in the
1034 # through the parents. It won't limit the branches returned in the
1035 # result though.
1035 # result though.
1036 def branchlookup(self, heads=None, branch=None):
1036 def branchlookup(self, heads=None, branch=None):
1037 if not heads:
1037 if not heads:
1038 heads = self.heads()
1038 heads = self.heads()
1039 headt = [ h for h in heads ]
1039 headt = [ h for h in heads ]
1040 chlog = self.changelog
1040 chlog = self.changelog
1041 branches = {}
1041 branches = {}
1042 merges = []
1042 merges = []
1043 seenmerge = {}
1043 seenmerge = {}
1044
1044
1045 # traverse the tree once for each head, recording in the branches
1045 # traverse the tree once for each head, recording in the branches
1046 # dict which tags are visible from this head. The branches
1046 # dict which tags are visible from this head. The branches
1047 # dict also records which tags are visible from each tag
1047 # dict also records which tags are visible from each tag
1048 # while we traverse.
1048 # while we traverse.
1049 while headt or merges:
1049 while headt or merges:
1050 if merges:
1050 if merges:
1051 n, found = merges.pop()
1051 n, found = merges.pop()
1052 visit = [n]
1052 visit = [n]
1053 else:
1053 else:
1054 h = headt.pop()
1054 h = headt.pop()
1055 visit = [h]
1055 visit = [h]
1056 found = [h]
1056 found = [h]
1057 seen = {}
1057 seen = {}
1058 while visit:
1058 while visit:
1059 n = visit.pop()
1059 n = visit.pop()
1060 if n in seen:
1060 if n in seen:
1061 continue
1061 continue
1062 pp = chlog.parents(n)
1062 pp = chlog.parents(n)
1063 tags = self.nodetags(n)
1063 tags = self.nodetags(n)
1064 if tags:
1064 if tags:
1065 for x in tags:
1065 for x in tags:
1066 if x == 'tip':
1066 if x == 'tip':
1067 continue
1067 continue
1068 for f in found:
1068 for f in found:
1069 branches.setdefault(f, {})[n] = 1
1069 branches.setdefault(f, {})[n] = 1
1070 branches.setdefault(n, {})[n] = 1
1070 branches.setdefault(n, {})[n] = 1
1071 break
1071 break
1072 if n not in found:
1072 if n not in found:
1073 found.append(n)
1073 found.append(n)
1074 if branch in tags:
1074 if branch in tags:
1075 continue
1075 continue
1076 seen[n] = 1
1076 seen[n] = 1
1077 if pp[1] != nullid and n not in seenmerge:
1077 if pp[1] != nullid and n not in seenmerge:
1078 merges.append((pp[1], [x for x in found]))
1078 merges.append((pp[1], [x for x in found]))
1079 seenmerge[n] = 1
1079 seenmerge[n] = 1
1080 if pp[0] != nullid:
1080 if pp[0] != nullid:
1081 visit.append(pp[0])
1081 visit.append(pp[0])
1082 # traverse the branches dict, eliminating branch tags from each
1082 # traverse the branches dict, eliminating branch tags from each
1083 # head that are visible from another branch tag for that head.
1083 # head that are visible from another branch tag for that head.
1084 out = {}
1084 out = {}
1085 viscache = {}
1085 viscache = {}
1086 for h in heads:
1086 for h in heads:
1087 def visible(node):
1087 def visible(node):
1088 if node in viscache:
1088 if node in viscache:
1089 return viscache[node]
1089 return viscache[node]
1090 ret = {}
1090 ret = {}
1091 visit = [node]
1091 visit = [node]
1092 while visit:
1092 while visit:
1093 x = visit.pop()
1093 x = visit.pop()
1094 if x in viscache:
1094 if x in viscache:
1095 ret.update(viscache[x])
1095 ret.update(viscache[x])
1096 elif x not in ret:
1096 elif x not in ret:
1097 ret[x] = 1
1097 ret[x] = 1
1098 if x in branches:
1098 if x in branches:
1099 visit[len(visit):] = branches[x].keys()
1099 visit[len(visit):] = branches[x].keys()
1100 viscache[node] = ret
1100 viscache[node] = ret
1101 return ret
1101 return ret
1102 if h not in branches:
1102 if h not in branches:
1103 continue
1103 continue
1104 # O(n^2), but somewhat limited. This only searches the
1104 # O(n^2), but somewhat limited. This only searches the
1105 # tags visible from a specific head, not all the tags in the
1105 # tags visible from a specific head, not all the tags in the
1106 # whole repo.
1106 # whole repo.
1107 for b in branches[h]:
1107 for b in branches[h]:
1108 vis = False
1108 vis = False
1109 for bb in branches[h].keys():
1109 for bb in branches[h].keys():
1110 if b != bb:
1110 if b != bb:
1111 if b in visible(bb):
1111 if b in visible(bb):
1112 vis = True
1112 vis = True
1113 break
1113 break
1114 if not vis:
1114 if not vis:
1115 l = out.setdefault(h, [])
1115 l = out.setdefault(h, [])
1116 l[len(l):] = self.nodetags(b)
1116 l[len(l):] = self.nodetags(b)
1117 return out
1117 return out
1118
1118
1119 def branches(self, nodes):
1119 def branches(self, nodes):
1120 if not nodes:
1120 if not nodes:
1121 nodes = [self.changelog.tip()]
1121 nodes = [self.changelog.tip()]
1122 b = []
1122 b = []
1123 for n in nodes:
1123 for n in nodes:
1124 t = n
1124 t = n
1125 while 1:
1125 while 1:
1126 p = self.changelog.parents(n)
1126 p = self.changelog.parents(n)
1127 if p[1] != nullid or p[0] == nullid:
1127 if p[1] != nullid or p[0] == nullid:
1128 b.append((t, n, p[0], p[1]))
1128 b.append((t, n, p[0], p[1]))
1129 break
1129 break
1130 n = p[0]
1130 n = p[0]
1131 return b
1131 return b
1132
1132
1133 def between(self, pairs):
1133 def between(self, pairs):
1134 r = []
1134 r = []
1135
1135
1136 for top, bottom in pairs:
1136 for top, bottom in pairs:
1137 n, l, i = top, [], 0
1137 n, l, i = top, [], 0
1138 f = 1
1138 f = 1
1139
1139
1140 while n != bottom:
1140 while n != bottom:
1141 p = self.changelog.parents(n)[0]
1141 p = self.changelog.parents(n)[0]
1142 if i == f:
1142 if i == f:
1143 l.append(n)
1143 l.append(n)
1144 f = f * 2
1144 f = f * 2
1145 n = p
1145 n = p
1146 i += 1
1146 i += 1
1147
1147
1148 r.append(l)
1148 r.append(l)
1149
1149
1150 return r
1150 return r
1151
1151
1152 def findincoming(self, remote, base=None, heads=None, force=False):
1152 def findincoming(self, remote, base=None, heads=None, force=False):
1153 """Return list of roots of the subsets of missing nodes from remote
1153 """Return list of roots of the subsets of missing nodes from remote
1154
1154
1155 If base dict is specified, assume that these nodes and their parents
1155 If base dict is specified, assume that these nodes and their parents
1156 exist on the remote side and that no child of a node of base exists
1156 exist on the remote side and that no child of a node of base exists
1157 in both remote and self.
1157 in both remote and self.
1158 Furthermore base will be updated to include the nodes that exists
1158 Furthermore base will be updated to include the nodes that exists
1159 in self and remote but no children exists in self and remote.
1159 in self and remote but no children exists in self and remote.
1160 If a list of heads is specified, return only nodes which are heads
1160 If a list of heads is specified, return only nodes which are heads
1161 or ancestors of these heads.
1161 or ancestors of these heads.
1162
1162
1163 All the ancestors of base are in self and in remote.
1163 All the ancestors of base are in self and in remote.
1164 All the descendants of the list returned are missing in self.
1164 All the descendants of the list returned are missing in self.
1165 (and so we know that the rest of the nodes are missing in remote, see
1165 (and so we know that the rest of the nodes are missing in remote, see
1166 outgoing)
1166 outgoing)
1167 """
1167 """
1168 m = self.changelog.nodemap
1168 m = self.changelog.nodemap
1169 search = []
1169 search = []
1170 fetch = {}
1170 fetch = {}
1171 seen = {}
1171 seen = {}
1172 seenbranch = {}
1172 seenbranch = {}
1173 if base == None:
1173 if base == None:
1174 base = {}
1174 base = {}
1175
1175
1176 if not heads:
1176 if not heads:
1177 heads = remote.heads()
1177 heads = remote.heads()
1178
1178
1179 if self.changelog.tip() == nullid:
1179 if self.changelog.tip() == nullid:
1180 base[nullid] = 1
1180 base[nullid] = 1
1181 if heads != [nullid]:
1181 if heads != [nullid]:
1182 return [nullid]
1182 return [nullid]
1183 return []
1183 return []
1184
1184
1185 # assume we're closer to the tip than the root
1185 # assume we're closer to the tip than the root
1186 # and start by examining the heads
1186 # and start by examining the heads
1187 self.ui.status(_("searching for changes\n"))
1187 self.ui.status(_("searching for changes\n"))
1188
1188
1189 unknown = []
1189 unknown = []
1190 for h in heads:
1190 for h in heads:
1191 if h not in m:
1191 if h not in m:
1192 unknown.append(h)
1192 unknown.append(h)
1193 else:
1193 else:
1194 base[h] = 1
1194 base[h] = 1
1195
1195
1196 if not unknown:
1196 if not unknown:
1197 return []
1197 return []
1198
1198
1199 req = dict.fromkeys(unknown)
1199 req = dict.fromkeys(unknown)
1200 reqcnt = 0
1200 reqcnt = 0
1201
1201
1202 # search through remote branches
1202 # search through remote branches
1203 # a 'branch' here is a linear segment of history, with four parts:
1203 # a 'branch' here is a linear segment of history, with four parts:
1204 # head, root, first parent, second parent
1204 # head, root, first parent, second parent
1205 # (a branch always has two parents (or none) by definition)
1205 # (a branch always has two parents (or none) by definition)
1206 unknown = remote.branches(unknown)
1206 unknown = remote.branches(unknown)
1207 while unknown:
1207 while unknown:
1208 r = []
1208 r = []
1209 while unknown:
1209 while unknown:
1210 n = unknown.pop(0)
1210 n = unknown.pop(0)
1211 if n[0] in seen:
1211 if n[0] in seen:
1212 continue
1212 continue
1213
1213
1214 self.ui.debug(_("examining %s:%s\n")
1214 self.ui.debug(_("examining %s:%s\n")
1215 % (short(n[0]), short(n[1])))
1215 % (short(n[0]), short(n[1])))
1216 if n[0] == nullid: # found the end of the branch
1216 if n[0] == nullid: # found the end of the branch
1217 pass
1217 pass
1218 elif n in seenbranch:
1218 elif n in seenbranch:
1219 self.ui.debug(_("branch already found\n"))
1219 self.ui.debug(_("branch already found\n"))
1220 continue
1220 continue
1221 elif n[1] and n[1] in m: # do we know the base?
1221 elif n[1] and n[1] in m: # do we know the base?
1222 self.ui.debug(_("found incomplete branch %s:%s\n")
1222 self.ui.debug(_("found incomplete branch %s:%s\n")
1223 % (short(n[0]), short(n[1])))
1223 % (short(n[0]), short(n[1])))
1224 search.append(n) # schedule branch range for scanning
1224 search.append(n) # schedule branch range for scanning
1225 seenbranch[n] = 1
1225 seenbranch[n] = 1
1226 else:
1226 else:
1227 if n[1] not in seen and n[1] not in fetch:
1227 if n[1] not in seen and n[1] not in fetch:
1228 if n[2] in m and n[3] in m:
1228 if n[2] in m and n[3] in m:
1229 self.ui.debug(_("found new changeset %s\n") %
1229 self.ui.debug(_("found new changeset %s\n") %
1230 short(n[1]))
1230 short(n[1]))
1231 fetch[n[1]] = 1 # earliest unknown
1231 fetch[n[1]] = 1 # earliest unknown
1232 for p in n[2:4]:
1232 for p in n[2:4]:
1233 if p in m:
1233 if p in m:
1234 base[p] = 1 # latest known
1234 base[p] = 1 # latest known
1235
1235
1236 for p in n[2:4]:
1236 for p in n[2:4]:
1237 if p not in req and p not in m:
1237 if p not in req and p not in m:
1238 r.append(p)
1238 r.append(p)
1239 req[p] = 1
1239 req[p] = 1
1240 seen[n[0]] = 1
1240 seen[n[0]] = 1
1241
1241
1242 if r:
1242 if r:
1243 reqcnt += 1
1243 reqcnt += 1
1244 self.ui.debug(_("request %d: %s\n") %
1244 self.ui.debug(_("request %d: %s\n") %
1245 (reqcnt, " ".join(map(short, r))))
1245 (reqcnt, " ".join(map(short, r))))
1246 for p in xrange(0, len(r), 10):
1246 for p in xrange(0, len(r), 10):
1247 for b in remote.branches(r[p:p+10]):
1247 for b in remote.branches(r[p:p+10]):
1248 self.ui.debug(_("received %s:%s\n") %
1248 self.ui.debug(_("received %s:%s\n") %
1249 (short(b[0]), short(b[1])))
1249 (short(b[0]), short(b[1])))
1250 unknown.append(b)
1250 unknown.append(b)
1251
1251
1252 # do binary search on the branches we found
1252 # do binary search on the branches we found
1253 while search:
1253 while search:
1254 n = search.pop(0)
1254 n = search.pop(0)
1255 reqcnt += 1
1255 reqcnt += 1
1256 l = remote.between([(n[0], n[1])])[0]
1256 l = remote.between([(n[0], n[1])])[0]
1257 l.append(n[1])
1257 l.append(n[1])
1258 p = n[0]
1258 p = n[0]
1259 f = 1
1259 f = 1
1260 for i in l:
1260 for i in l:
1261 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1261 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1262 if i in m:
1262 if i in m:
1263 if f <= 2:
1263 if f <= 2:
1264 self.ui.debug(_("found new branch changeset %s\n") %
1264 self.ui.debug(_("found new branch changeset %s\n") %
1265 short(p))
1265 short(p))
1266 fetch[p] = 1
1266 fetch[p] = 1
1267 base[i] = 1
1267 base[i] = 1
1268 else:
1268 else:
1269 self.ui.debug(_("narrowed branch search to %s:%s\n")
1269 self.ui.debug(_("narrowed branch search to %s:%s\n")
1270 % (short(p), short(i)))
1270 % (short(p), short(i)))
1271 search.append((p, i))
1271 search.append((p, i))
1272 break
1272 break
1273 p, f = i, f * 2
1273 p, f = i, f * 2
1274
1274
1275 # sanity check our fetch list
1275 # sanity check our fetch list
1276 for f in fetch.keys():
1276 for f in fetch.keys():
1277 if f in m:
1277 if f in m:
1278 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1278 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1279
1279
1280 if base.keys() == [nullid]:
1280 if base.keys() == [nullid]:
1281 if force:
1281 if force:
1282 self.ui.warn(_("warning: repository is unrelated\n"))
1282 self.ui.warn(_("warning: repository is unrelated\n"))
1283 else:
1283 else:
1284 raise util.Abort(_("repository is unrelated"))
1284 raise util.Abort(_("repository is unrelated"))
1285
1285
1286 self.ui.debug(_("found new changesets starting at ") +
1286 self.ui.debug(_("found new changesets starting at ") +
1287 " ".join([short(f) for f in fetch]) + "\n")
1287 " ".join([short(f) for f in fetch]) + "\n")
1288
1288
1289 self.ui.debug(_("%d total queries\n") % reqcnt)
1289 self.ui.debug(_("%d total queries\n") % reqcnt)
1290
1290
1291 return fetch.keys()
1291 return fetch.keys()
1292
1292
1293 def findoutgoing(self, remote, base=None, heads=None, force=False):
1293 def findoutgoing(self, remote, base=None, heads=None, force=False):
1294 """Return list of nodes that are roots of subsets not in remote
1294 """Return list of nodes that are roots of subsets not in remote
1295
1295
1296 If base dict is specified, assume that these nodes and their parents
1296 If base dict is specified, assume that these nodes and their parents
1297 exist on the remote side.
1297 exist on the remote side.
1298 If a list of heads is specified, return only nodes which are heads
1298 If a list of heads is specified, return only nodes which are heads
1299 or ancestors of these heads, and return a second element which
1299 or ancestors of these heads, and return a second element which
1300 contains all remote heads which get new children.
1300 contains all remote heads which get new children.
1301 """
1301 """
1302 if base == None:
1302 if base == None:
1303 base = {}
1303 base = {}
1304 self.findincoming(remote, base, heads, force=force)
1304 self.findincoming(remote, base, heads, force=force)
1305
1305
1306 self.ui.debug(_("common changesets up to ")
1306 self.ui.debug(_("common changesets up to ")
1307 + " ".join(map(short, base.keys())) + "\n")
1307 + " ".join(map(short, base.keys())) + "\n")
1308
1308
1309 remain = dict.fromkeys(self.changelog.nodemap)
1309 remain = dict.fromkeys(self.changelog.nodemap)
1310
1310
1311 # prune everything remote has from the tree
1311 # prune everything remote has from the tree
1312 del remain[nullid]
1312 del remain[nullid]
1313 remove = base.keys()
1313 remove = base.keys()
1314 while remove:
1314 while remove:
1315 n = remove.pop(0)
1315 n = remove.pop(0)
1316 if n in remain:
1316 if n in remain:
1317 del remain[n]
1317 del remain[n]
1318 for p in self.changelog.parents(n):
1318 for p in self.changelog.parents(n):
1319 remove.append(p)
1319 remove.append(p)
1320
1320
1321 # find every node whose parents have been pruned
1321 # find every node whose parents have been pruned
1322 subset = []
1322 subset = []
1323 # find every remote head that will get new children
1323 # find every remote head that will get new children
1324 updated_heads = {}
1324 updated_heads = {}
1325 for n in remain:
1325 for n in remain:
1326 p1, p2 = self.changelog.parents(n)
1326 p1, p2 = self.changelog.parents(n)
1327 if p1 not in remain and p2 not in remain:
1327 if p1 not in remain and p2 not in remain:
1328 subset.append(n)
1328 subset.append(n)
1329 if heads:
1329 if heads:
1330 if p1 in heads:
1330 if p1 in heads:
1331 updated_heads[p1] = True
1331 updated_heads[p1] = True
1332 if p2 in heads:
1332 if p2 in heads:
1333 updated_heads[p2] = True
1333 updated_heads[p2] = True
1334
1334
1335 # this is the set of all roots we have to push
1335 # this is the set of all roots we have to push
1336 if heads:
1336 if heads:
1337 return subset, updated_heads.keys()
1337 return subset, updated_heads.keys()
1338 else:
1338 else:
1339 return subset
1339 return subset
1340
1340
1341 def pull(self, remote, heads=None, force=False, lock=None):
1341 def pull(self, remote, heads=None, force=False, lock=None):
1342 mylock = False
1342 mylock = False
1343 if not lock:
1343 if not lock:
1344 lock = self.lock()
1344 lock = self.lock()
1345 mylock = True
1345 mylock = True
1346
1346
1347 try:
1347 try:
1348 fetch = self.findincoming(remote, force=force)
1348 fetch = self.findincoming(remote, force=force)
1349 if fetch == [nullid]:
1349 if fetch == [nullid]:
1350 self.ui.status(_("requesting all changes\n"))
1350 self.ui.status(_("requesting all changes\n"))
1351
1351
1352 if not fetch:
1352 if not fetch:
1353 self.ui.status(_("no changes found\n"))
1353 self.ui.status(_("no changes found\n"))
1354 return 0
1354 return 0
1355
1355
1356 if heads is None:
1356 if heads is None:
1357 cg = remote.changegroup(fetch, 'pull')
1357 cg = remote.changegroup(fetch, 'pull')
1358 else:
1358 else:
1359 if 'changegroupsubset' not in remote.capabilities:
1359 if 'changegroupsubset' not in remote.capabilities:
1360 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1360 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1361 cg = remote.changegroupsubset(fetch, heads, 'pull')
1361 cg = remote.changegroupsubset(fetch, heads, 'pull')
1362 return self.addchangegroup(cg, 'pull', remote.url())
1362 return self.addchangegroup(cg, 'pull', remote.url())
1363 finally:
1363 finally:
1364 if mylock:
1364 if mylock:
1365 lock.release()
1365 lock.release()
1366
1366
1367 def push(self, remote, force=False, revs=None):
1367 def push(self, remote, force=False, revs=None):
1368 # there are two ways to push to remote repo:
1368 # there are two ways to push to remote repo:
1369 #
1369 #
1370 # addchangegroup assumes local user can lock remote
1370 # addchangegroup assumes local user can lock remote
1371 # repo (local filesystem, old ssh servers).
1371 # repo (local filesystem, old ssh servers).
1372 #
1372 #
1373 # unbundle assumes local user cannot lock remote repo (new ssh
1373 # unbundle assumes local user cannot lock remote repo (new ssh
1374 # servers, http servers).
1374 # servers, http servers).
1375
1375
1376 if remote.capable('unbundle'):
1376 if remote.capable('unbundle'):
1377 return self.push_unbundle(remote, force, revs)
1377 return self.push_unbundle(remote, force, revs)
1378 return self.push_addchangegroup(remote, force, revs)
1378 return self.push_addchangegroup(remote, force, revs)
1379
1379
1380 def prepush(self, remote, force, revs):
1380 def prepush(self, remote, force, revs):
1381 base = {}
1381 base = {}
1382 remote_heads = remote.heads()
1382 remote_heads = remote.heads()
1383 inc = self.findincoming(remote, base, remote_heads, force=force)
1383 inc = self.findincoming(remote, base, remote_heads, force=force)
1384
1384
1385 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1385 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1386 if revs is not None:
1386 if revs is not None:
1387 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1387 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1388 else:
1388 else:
1389 bases, heads = update, self.changelog.heads()
1389 bases, heads = update, self.changelog.heads()
1390
1390
1391 if not bases:
1391 if not bases:
1392 self.ui.status(_("no changes found\n"))
1392 self.ui.status(_("no changes found\n"))
1393 return None, 1
1393 return None, 1
1394 elif not force:
1394 elif not force:
1395 # check if we're creating new remote heads
1395 # check if we're creating new remote heads
1396 # to be a remote head after push, node must be either
1396 # to be a remote head after push, node must be either
1397 # - unknown locally
1397 # - unknown locally
1398 # - a local outgoing head descended from update
1398 # - a local outgoing head descended from update
1399 # - a remote head that's known locally and not
1399 # - a remote head that's known locally and not
1400 # ancestral to an outgoing head
1400 # ancestral to an outgoing head
1401
1401
1402 warn = 0
1402 warn = 0
1403
1403
1404 if remote_heads == [nullid]:
1404 if remote_heads == [nullid]:
1405 warn = 0
1405 warn = 0
1406 elif not revs and len(heads) > len(remote_heads):
1406 elif not revs and len(heads) > len(remote_heads):
1407 warn = 1
1407 warn = 1
1408 else:
1408 else:
1409 newheads = list(heads)
1409 newheads = list(heads)
1410 for r in remote_heads:
1410 for r in remote_heads:
1411 if r in self.changelog.nodemap:
1411 if r in self.changelog.nodemap:
1412 desc = self.changelog.heads(r, heads)
1412 desc = self.changelog.heads(r, heads)
1413 l = [h for h in heads if h in desc]
1413 l = [h for h in heads if h in desc]
1414 if not l:
1414 if not l:
1415 newheads.append(r)
1415 newheads.append(r)
1416 else:
1416 else:
1417 newheads.append(r)
1417 newheads.append(r)
1418 if len(newheads) > len(remote_heads):
1418 if len(newheads) > len(remote_heads):
1419 warn = 1
1419 warn = 1
1420
1420
1421 if warn:
1421 if warn:
1422 self.ui.warn(_("abort: push creates new remote branches!\n"))
1422 self.ui.warn(_("abort: push creates new remote branches!\n"))
1423 self.ui.status(_("(did you forget to merge?"
1423 self.ui.status(_("(did you forget to merge?"
1424 " use push -f to force)\n"))
1424 " use push -f to force)\n"))
1425 return None, 1
1425 return None, 1
1426 elif inc:
1426 elif inc:
1427 self.ui.warn(_("note: unsynced remote changes!\n"))
1427 self.ui.warn(_("note: unsynced remote changes!\n"))
1428
1428
1429
1429
1430 if revs is None:
1430 if revs is None:
1431 cg = self.changegroup(update, 'push')
1431 cg = self.changegroup(update, 'push')
1432 else:
1432 else:
1433 cg = self.changegroupsubset(update, revs, 'push')
1433 cg = self.changegroupsubset(update, revs, 'push')
1434 return cg, remote_heads
1434 return cg, remote_heads
1435
1435
1436 def push_addchangegroup(self, remote, force, revs):
1436 def push_addchangegroup(self, remote, force, revs):
1437 lock = remote.lock()
1437 lock = remote.lock()
1438
1438
1439 ret = self.prepush(remote, force, revs)
1439 ret = self.prepush(remote, force, revs)
1440 if ret[0] is not None:
1440 if ret[0] is not None:
1441 cg, remote_heads = ret
1441 cg, remote_heads = ret
1442 return remote.addchangegroup(cg, 'push', self.url())
1442 return remote.addchangegroup(cg, 'push', self.url())
1443 return ret[1]
1443 return ret[1]
1444
1444
1445 def push_unbundle(self, remote, force, revs):
1445 def push_unbundle(self, remote, force, revs):
1446 # local repo finds heads on server, finds out what revs it
1446 # local repo finds heads on server, finds out what revs it
1447 # must push. once revs transferred, if server finds it has
1447 # must push. once revs transferred, if server finds it has
1448 # different heads (someone else won commit/push race), server
1448 # different heads (someone else won commit/push race), server
1449 # aborts.
1449 # aborts.
1450
1450
1451 ret = self.prepush(remote, force, revs)
1451 ret = self.prepush(remote, force, revs)
1452 if ret[0] is not None:
1452 if ret[0] is not None:
1453 cg, remote_heads = ret
1453 cg, remote_heads = ret
1454 if force: remote_heads = ['force']
1454 if force: remote_heads = ['force']
1455 return remote.unbundle(cg, remote_heads, 'push')
1455 return remote.unbundle(cg, remote_heads, 'push')
1456 return ret[1]
1456 return ret[1]
1457
1457
1458 def changegroupinfo(self, nodes):
1458 def changegroupinfo(self, nodes):
1459 self.ui.note(_("%d changesets found\n") % len(nodes))
1459 self.ui.note(_("%d changesets found\n") % len(nodes))
1460 if self.ui.debugflag:
1460 if self.ui.debugflag:
1461 self.ui.debug(_("List of changesets:\n"))
1461 self.ui.debug(_("List of changesets:\n"))
1462 for node in nodes:
1462 for node in nodes:
1463 self.ui.debug("%s\n" % hex(node))
1463 self.ui.debug("%s\n" % hex(node))
1464
1464
1465 def changegroupsubset(self, bases, heads, source):
1465 def changegroupsubset(self, bases, heads, source):
1466 """This function generates a changegroup consisting of all the nodes
1466 """This function generates a changegroup consisting of all the nodes
1467 that are descendents of any of the bases, and ancestors of any of
1467 that are descendents of any of the bases, and ancestors of any of
1468 the heads.
1468 the heads.
1469
1469
1470 It is fairly complex as determining which filenodes and which
1470 It is fairly complex as determining which filenodes and which
1471 manifest nodes need to be included for the changeset to be complete
1471 manifest nodes need to be included for the changeset to be complete
1472 is non-trivial.
1472 is non-trivial.
1473
1473
1474 Another wrinkle is doing the reverse, figuring out which changeset in
1474 Another wrinkle is doing the reverse, figuring out which changeset in
1475 the changegroup a particular filenode or manifestnode belongs to."""
1475 the changegroup a particular filenode or manifestnode belongs to."""
1476
1476
1477 self.hook('preoutgoing', throw=True, source=source)
1477 self.hook('preoutgoing', throw=True, source=source)
1478
1478
1479 # Set up some initial variables
1479 # Set up some initial variables
1480 # Make it easy to refer to self.changelog
1480 # Make it easy to refer to self.changelog
1481 cl = self.changelog
1481 cl = self.changelog
1482 # msng is short for missing - compute the list of changesets in this
1482 # msng is short for missing - compute the list of changesets in this
1483 # changegroup.
1483 # changegroup.
1484 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1484 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1485 self.changegroupinfo(msng_cl_lst)
1485 self.changegroupinfo(msng_cl_lst)
1486 # Some bases may turn out to be superfluous, and some heads may be
1486 # Some bases may turn out to be superfluous, and some heads may be
1487 # too. nodesbetween will return the minimal set of bases and heads
1487 # too. nodesbetween will return the minimal set of bases and heads
1488 # necessary to re-create the changegroup.
1488 # necessary to re-create the changegroup.
1489
1489
1490 # Known heads are the list of heads that it is assumed the recipient
1490 # Known heads are the list of heads that it is assumed the recipient
1491 # of this changegroup will know about.
1491 # of this changegroup will know about.
1492 knownheads = {}
1492 knownheads = {}
1493 # We assume that all parents of bases are known heads.
1493 # We assume that all parents of bases are known heads.
1494 for n in bases:
1494 for n in bases:
1495 for p in cl.parents(n):
1495 for p in cl.parents(n):
1496 if p != nullid:
1496 if p != nullid:
1497 knownheads[p] = 1
1497 knownheads[p] = 1
1498 knownheads = knownheads.keys()
1498 knownheads = knownheads.keys()
1499 if knownheads:
1499 if knownheads:
1500 # Now that we know what heads are known, we can compute which
1500 # Now that we know what heads are known, we can compute which
1501 # changesets are known. The recipient must know about all
1501 # changesets are known. The recipient must know about all
1502 # changesets required to reach the known heads from the null
1502 # changesets required to reach the known heads from the null
1503 # changeset.
1503 # changeset.
1504 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1504 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1505 junk = None
1505 junk = None
1506 # Transform the list into an ersatz set.
1506 # Transform the list into an ersatz set.
1507 has_cl_set = dict.fromkeys(has_cl_set)
1507 has_cl_set = dict.fromkeys(has_cl_set)
1508 else:
1508 else:
1509 # If there were no known heads, the recipient cannot be assumed to
1509 # If there were no known heads, the recipient cannot be assumed to
1510 # know about any changesets.
1510 # know about any changesets.
1511 has_cl_set = {}
1511 has_cl_set = {}
1512
1512
1513 # Make it easy to refer to self.manifest
1513 # Make it easy to refer to self.manifest
1514 mnfst = self.manifest
1514 mnfst = self.manifest
1515 # We don't know which manifests are missing yet
1515 # We don't know which manifests are missing yet
1516 msng_mnfst_set = {}
1516 msng_mnfst_set = {}
1517 # Nor do we know which filenodes are missing.
1517 # Nor do we know which filenodes are missing.
1518 msng_filenode_set = {}
1518 msng_filenode_set = {}
1519
1519
1520 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1520 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1521 junk = None
1521 junk = None
1522
1522
1523 # A changeset always belongs to itself, so the changenode lookup
1523 # A changeset always belongs to itself, so the changenode lookup
1524 # function for a changenode is identity.
1524 # function for a changenode is identity.
1525 def identity(x):
1525 def identity(x):
1526 return x
1526 return x
1527
1527
1528 # A function generating function. Sets up an environment for the
1528 # A function generating function. Sets up an environment for the
1529 # inner function.
1529 # inner function.
1530 def cmp_by_rev_func(revlog):
1530 def cmp_by_rev_func(revlog):
1531 # Compare two nodes by their revision number in the environment's
1531 # Compare two nodes by their revision number in the environment's
1532 # revision history. Since the revision number both represents the
1532 # revision history. Since the revision number both represents the
1533 # most efficient order to read the nodes in, and represents a
1533 # most efficient order to read the nodes in, and represents a
1534 # topological sorting of the nodes, this function is often useful.
1534 # topological sorting of the nodes, this function is often useful.
1535 def cmp_by_rev(a, b):
1535 def cmp_by_rev(a, b):
1536 return cmp(revlog.rev(a), revlog.rev(b))
1536 return cmp(revlog.rev(a), revlog.rev(b))
1537 return cmp_by_rev
1537 return cmp_by_rev
1538
1538
1539 # If we determine that a particular file or manifest node must be a
1539 # If we determine that a particular file or manifest node must be a
1540 # node that the recipient of the changegroup will already have, we can
1540 # node that the recipient of the changegroup will already have, we can
1541 # also assume the recipient will have all the parents. This function
1541 # also assume the recipient will have all the parents. This function
1542 # prunes them from the set of missing nodes.
1542 # prunes them from the set of missing nodes.
1543 def prune_parents(revlog, hasset, msngset):
1543 def prune_parents(revlog, hasset, msngset):
1544 haslst = hasset.keys()
1544 haslst = hasset.keys()
1545 haslst.sort(cmp_by_rev_func(revlog))
1545 haslst.sort(cmp_by_rev_func(revlog))
1546 for node in haslst:
1546 for node in haslst:
1547 parentlst = [p for p in revlog.parents(node) if p != nullid]
1547 parentlst = [p for p in revlog.parents(node) if p != nullid]
1548 while parentlst:
1548 while parentlst:
1549 n = parentlst.pop()
1549 n = parentlst.pop()
1550 if n not in hasset:
1550 if n not in hasset:
1551 hasset[n] = 1
1551 hasset[n] = 1
1552 p = [p for p in revlog.parents(n) if p != nullid]
1552 p = [p for p in revlog.parents(n) if p != nullid]
1553 parentlst.extend(p)
1553 parentlst.extend(p)
1554 for n in hasset:
1554 for n in hasset:
1555 msngset.pop(n, None)
1555 msngset.pop(n, None)
1556
1556
1557 # This is a function generating function used to set up an environment
1557 # This is a function generating function used to set up an environment
1558 # for the inner function to execute in.
1558 # for the inner function to execute in.
1559 def manifest_and_file_collector(changedfileset):
1559 def manifest_and_file_collector(changedfileset):
1560 # This is an information gathering function that gathers
1560 # This is an information gathering function that gathers
1561 # information from each changeset node that goes out as part of
1561 # information from each changeset node that goes out as part of
1562 # the changegroup. The information gathered is a list of which
1562 # the changegroup. The information gathered is a list of which
1563 # manifest nodes are potentially required (the recipient may
1563 # manifest nodes are potentially required (the recipient may
1564 # already have them) and total list of all files which were
1564 # already have them) and total list of all files which were
1565 # changed in any changeset in the changegroup.
1565 # changed in any changeset in the changegroup.
1566 #
1566 #
1567 # We also remember the first changenode we saw any manifest
1567 # We also remember the first changenode we saw any manifest
1568 # referenced by so we can later determine which changenode 'owns'
1568 # referenced by so we can later determine which changenode 'owns'
1569 # the manifest.
1569 # the manifest.
1570 def collect_manifests_and_files(clnode):
1570 def collect_manifests_and_files(clnode):
1571 c = cl.read(clnode)
1571 c = cl.read(clnode)
1572 for f in c[3]:
1572 for f in c[3]:
1573 # This is to make sure we only have one instance of each
1573 # This is to make sure we only have one instance of each
1574 # filename string for each filename.
1574 # filename string for each filename.
1575 changedfileset.setdefault(f, f)
1575 changedfileset.setdefault(f, f)
1576 msng_mnfst_set.setdefault(c[0], clnode)
1576 msng_mnfst_set.setdefault(c[0], clnode)
1577 return collect_manifests_and_files
1577 return collect_manifests_and_files
1578
1578
1579 # Figure out which manifest nodes (of the ones we think might be part
1579 # Figure out which manifest nodes (of the ones we think might be part
1580 # of the changegroup) the recipient must know about and remove them
1580 # of the changegroup) the recipient must know about and remove them
1581 # from the changegroup.
1581 # from the changegroup.
1582 def prune_manifests():
1582 def prune_manifests():
1583 has_mnfst_set = {}
1583 has_mnfst_set = {}
1584 for n in msng_mnfst_set:
1584 for n in msng_mnfst_set:
1585 # If a 'missing' manifest thinks it belongs to a changenode
1585 # If a 'missing' manifest thinks it belongs to a changenode
1586 # the recipient is assumed to have, obviously the recipient
1586 # the recipient is assumed to have, obviously the recipient
1587 # must have that manifest.
1587 # must have that manifest.
1588 linknode = cl.node(mnfst.linkrev(n))
1588 linknode = cl.node(mnfst.linkrev(n))
1589 if linknode in has_cl_set:
1589 if linknode in has_cl_set:
1590 has_mnfst_set[n] = 1
1590 has_mnfst_set[n] = 1
1591 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1591 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1592
1592
1593 # Use the information collected in collect_manifests_and_files to say
1593 # Use the information collected in collect_manifests_and_files to say
1594 # which changenode any manifestnode belongs to.
1594 # which changenode any manifestnode belongs to.
1595 def lookup_manifest_link(mnfstnode):
1595 def lookup_manifest_link(mnfstnode):
1596 return msng_mnfst_set[mnfstnode]
1596 return msng_mnfst_set[mnfstnode]
1597
1597
1598 # A function generating function that sets up the initial environment
1598 # A function generating function that sets up the initial environment
1599 # the inner function.
1599 # the inner function.
1600 def filenode_collector(changedfiles):
1600 def filenode_collector(changedfiles):
1601 next_rev = [0]
1601 next_rev = [0]
1602 # This gathers information from each manifestnode included in the
1602 # This gathers information from each manifestnode included in the
1603 # changegroup about which filenodes the manifest node references
1603 # changegroup about which filenodes the manifest node references
1604 # so we can include those in the changegroup too.
1604 # so we can include those in the changegroup too.
1605 #
1605 #
1606 # It also remembers which changenode each filenode belongs to. It
1606 # It also remembers which changenode each filenode belongs to. It
1607 # does this by assuming the a filenode belongs to the changenode
1607 # does this by assuming the a filenode belongs to the changenode
1608 # the first manifest that references it belongs to.
1608 # the first manifest that references it belongs to.
1609 def collect_msng_filenodes(mnfstnode):
1609 def collect_msng_filenodes(mnfstnode):
1610 r = mnfst.rev(mnfstnode)
1610 r = mnfst.rev(mnfstnode)
1611 if r == next_rev[0]:
1611 if r == next_rev[0]:
1612 # If the last rev we looked at was the one just previous,
1612 # If the last rev we looked at was the one just previous,
1613 # we only need to see a diff.
1613 # we only need to see a diff.
1614 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1614 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1615 # For each line in the delta
1615 # For each line in the delta
1616 for dline in delta.splitlines():
1616 for dline in delta.splitlines():
1617 # get the filename and filenode for that line
1617 # get the filename and filenode for that line
1618 f, fnode = dline.split('\0')
1618 f, fnode = dline.split('\0')
1619 fnode = bin(fnode[:40])
1619 fnode = bin(fnode[:40])
1620 f = changedfiles.get(f, None)
1620 f = changedfiles.get(f, None)
1621 # And if the file is in the list of files we care
1621 # And if the file is in the list of files we care
1622 # about.
1622 # about.
1623 if f is not None:
1623 if f is not None:
1624 # Get the changenode this manifest belongs to
1624 # Get the changenode this manifest belongs to
1625 clnode = msng_mnfst_set[mnfstnode]
1625 clnode = msng_mnfst_set[mnfstnode]
1626 # Create the set of filenodes for the file if
1626 # Create the set of filenodes for the file if
1627 # there isn't one already.
1627 # there isn't one already.
1628 ndset = msng_filenode_set.setdefault(f, {})
1628 ndset = msng_filenode_set.setdefault(f, {})
1629 # And set the filenode's changelog node to the
1629 # And set the filenode's changelog node to the
1630 # manifest's if it hasn't been set already.
1630 # manifest's if it hasn't been set already.
1631 ndset.setdefault(fnode, clnode)
1631 ndset.setdefault(fnode, clnode)
1632 else:
1632 else:
1633 # Otherwise we need a full manifest.
1633 # Otherwise we need a full manifest.
1634 m = mnfst.read(mnfstnode)
1634 m = mnfst.read(mnfstnode)
1635 # For every file in we care about.
1635 # For every file in we care about.
1636 for f in changedfiles:
1636 for f in changedfiles:
1637 fnode = m.get(f, None)
1637 fnode = m.get(f, None)
1638 # If it's in the manifest
1638 # If it's in the manifest
1639 if fnode is not None:
1639 if fnode is not None:
1640 # See comments above.
1640 # See comments above.
1641 clnode = msng_mnfst_set[mnfstnode]
1641 clnode = msng_mnfst_set[mnfstnode]
1642 ndset = msng_filenode_set.setdefault(f, {})
1642 ndset = msng_filenode_set.setdefault(f, {})
1643 ndset.setdefault(fnode, clnode)
1643 ndset.setdefault(fnode, clnode)
1644 # Remember the revision we hope to see next.
1644 # Remember the revision we hope to see next.
1645 next_rev[0] = r + 1
1645 next_rev[0] = r + 1
1646 return collect_msng_filenodes
1646 return collect_msng_filenodes
1647
1647
1648 # We have a list of filenodes we think we need for a file, lets remove
1648 # We have a list of filenodes we think we need for a file, lets remove
1649 # all those we now the recipient must have.
1649 # all those we now the recipient must have.
1650 def prune_filenodes(f, filerevlog):
1650 def prune_filenodes(f, filerevlog):
1651 msngset = msng_filenode_set[f]
1651 msngset = msng_filenode_set[f]
1652 hasset = {}
1652 hasset = {}
1653 # If a 'missing' filenode thinks it belongs to a changenode we
1653 # If a 'missing' filenode thinks it belongs to a changenode we
1654 # assume the recipient must have, then the recipient must have
1654 # assume the recipient must have, then the recipient must have
1655 # that filenode.
1655 # that filenode.
1656 for n in msngset:
1656 for n in msngset:
1657 clnode = cl.node(filerevlog.linkrev(n))
1657 clnode = cl.node(filerevlog.linkrev(n))
1658 if clnode in has_cl_set:
1658 if clnode in has_cl_set:
1659 hasset[n] = 1
1659 hasset[n] = 1
1660 prune_parents(filerevlog, hasset, msngset)
1660 prune_parents(filerevlog, hasset, msngset)
1661
1661
1662 # A function generator function that sets up the a context for the
1662 # A function generator function that sets up the a context for the
1663 # inner function.
1663 # inner function.
1664 def lookup_filenode_link_func(fname):
1664 def lookup_filenode_link_func(fname):
1665 msngset = msng_filenode_set[fname]
1665 msngset = msng_filenode_set[fname]
1666 # Lookup the changenode the filenode belongs to.
1666 # Lookup the changenode the filenode belongs to.
1667 def lookup_filenode_link(fnode):
1667 def lookup_filenode_link(fnode):
1668 return msngset[fnode]
1668 return msngset[fnode]
1669 return lookup_filenode_link
1669 return lookup_filenode_link
1670
1670
1671 # Now that we have all theses utility functions to help out and
1671 # Now that we have all theses utility functions to help out and
1672 # logically divide up the task, generate the group.
1672 # logically divide up the task, generate the group.
1673 def gengroup():
1673 def gengroup():
1674 # The set of changed files starts empty.
1674 # The set of changed files starts empty.
1675 changedfiles = {}
1675 changedfiles = {}
1676 # Create a changenode group generator that will call our functions
1676 # Create a changenode group generator that will call our functions
1677 # back to lookup the owning changenode and collect information.
1677 # back to lookup the owning changenode and collect information.
1678 group = cl.group(msng_cl_lst, identity,
1678 group = cl.group(msng_cl_lst, identity,
1679 manifest_and_file_collector(changedfiles))
1679 manifest_and_file_collector(changedfiles))
1680 for chnk in group:
1680 for chnk in group:
1681 yield chnk
1681 yield chnk
1682
1682
1683 # The list of manifests has been collected by the generator
1683 # The list of manifests has been collected by the generator
1684 # calling our functions back.
1684 # calling our functions back.
1685 prune_manifests()
1685 prune_manifests()
1686 msng_mnfst_lst = msng_mnfst_set.keys()
1686 msng_mnfst_lst = msng_mnfst_set.keys()
1687 # Sort the manifestnodes by revision number.
1687 # Sort the manifestnodes by revision number.
1688 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1688 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1689 # Create a generator for the manifestnodes that calls our lookup
1689 # Create a generator for the manifestnodes that calls our lookup
1690 # and data collection functions back.
1690 # and data collection functions back.
1691 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1691 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1692 filenode_collector(changedfiles))
1692 filenode_collector(changedfiles))
1693 for chnk in group:
1693 for chnk in group:
1694 yield chnk
1694 yield chnk
1695
1695
1696 # These are no longer needed, dereference and toss the memory for
1696 # These are no longer needed, dereference and toss the memory for
1697 # them.
1697 # them.
1698 msng_mnfst_lst = None
1698 msng_mnfst_lst = None
1699 msng_mnfst_set.clear()
1699 msng_mnfst_set.clear()
1700
1700
1701 changedfiles = changedfiles.keys()
1701 changedfiles = changedfiles.keys()
1702 changedfiles.sort()
1702 changedfiles.sort()
1703 # Go through all our files in order sorted by name.
1703 # Go through all our files in order sorted by name.
1704 for fname in changedfiles:
1704 for fname in changedfiles:
1705 filerevlog = self.file(fname)
1705 filerevlog = self.file(fname)
1706 # Toss out the filenodes that the recipient isn't really
1706 # Toss out the filenodes that the recipient isn't really
1707 # missing.
1707 # missing.
1708 if msng_filenode_set.has_key(fname):
1708 if msng_filenode_set.has_key(fname):
1709 prune_filenodes(fname, filerevlog)
1709 prune_filenodes(fname, filerevlog)
1710 msng_filenode_lst = msng_filenode_set[fname].keys()
1710 msng_filenode_lst = msng_filenode_set[fname].keys()
1711 else:
1711 else:
1712 msng_filenode_lst = []
1712 msng_filenode_lst = []
1713 # If any filenodes are left, generate the group for them,
1713 # If any filenodes are left, generate the group for them,
1714 # otherwise don't bother.
1714 # otherwise don't bother.
1715 if len(msng_filenode_lst) > 0:
1715 if len(msng_filenode_lst) > 0:
1716 yield changegroup.genchunk(fname)
1716 yield changegroup.genchunk(fname)
1717 # Sort the filenodes by their revision #
1717 # Sort the filenodes by their revision #
1718 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1718 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1719 # Create a group generator and only pass in a changenode
1719 # Create a group generator and only pass in a changenode
1720 # lookup function as we need to collect no information
1720 # lookup function as we need to collect no information
1721 # from filenodes.
1721 # from filenodes.
1722 group = filerevlog.group(msng_filenode_lst,
1722 group = filerevlog.group(msng_filenode_lst,
1723 lookup_filenode_link_func(fname))
1723 lookup_filenode_link_func(fname))
1724 for chnk in group:
1724 for chnk in group:
1725 yield chnk
1725 yield chnk
1726 if msng_filenode_set.has_key(fname):
1726 if msng_filenode_set.has_key(fname):
1727 # Don't need this anymore, toss it to free memory.
1727 # Don't need this anymore, toss it to free memory.
1728 del msng_filenode_set[fname]
1728 del msng_filenode_set[fname]
1729 # Signal that no more groups are left.
1729 # Signal that no more groups are left.
1730 yield changegroup.closechunk()
1730 yield changegroup.closechunk()
1731
1731
1732 if msng_cl_lst:
1732 if msng_cl_lst:
1733 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1733 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1734
1734
1735 return util.chunkbuffer(gengroup())
1735 return util.chunkbuffer(gengroup())
1736
1736
1737 def changegroup(self, basenodes, source):
1737 def changegroup(self, basenodes, source):
1738 """Generate a changegroup of all nodes that we have that a recipient
1738 """Generate a changegroup of all nodes that we have that a recipient
1739 doesn't.
1739 doesn't.
1740
1740
1741 This is much easier than the previous function as we can assume that
1741 This is much easier than the previous function as we can assume that
1742 the recipient has any changenode we aren't sending them."""
1742 the recipient has any changenode we aren't sending them."""
1743
1743
1744 self.hook('preoutgoing', throw=True, source=source)
1744 self.hook('preoutgoing', throw=True, source=source)
1745
1745
1746 cl = self.changelog
1746 cl = self.changelog
1747 nodes = cl.nodesbetween(basenodes, None)[0]
1747 nodes = cl.nodesbetween(basenodes, None)[0]
1748 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1748 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1749 self.changegroupinfo(nodes)
1749 self.changegroupinfo(nodes)
1750
1750
1751 def identity(x):
1751 def identity(x):
1752 return x
1752 return x
1753
1753
1754 def gennodelst(revlog):
1754 def gennodelst(revlog):
1755 for r in xrange(0, revlog.count()):
1755 for r in xrange(0, revlog.count()):
1756 n = revlog.node(r)
1756 n = revlog.node(r)
1757 if revlog.linkrev(n) in revset:
1757 if revlog.linkrev(n) in revset:
1758 yield n
1758 yield n
1759
1759
1760 def changed_file_collector(changedfileset):
1760 def changed_file_collector(changedfileset):
1761 def collect_changed_files(clnode):
1761 def collect_changed_files(clnode):
1762 c = cl.read(clnode)
1762 c = cl.read(clnode)
1763 for fname in c[3]:
1763 for fname in c[3]:
1764 changedfileset[fname] = 1
1764 changedfileset[fname] = 1
1765 return collect_changed_files
1765 return collect_changed_files
1766
1766
1767 def lookuprevlink_func(revlog):
1767 def lookuprevlink_func(revlog):
1768 def lookuprevlink(n):
1768 def lookuprevlink(n):
1769 return cl.node(revlog.linkrev(n))
1769 return cl.node(revlog.linkrev(n))
1770 return lookuprevlink
1770 return lookuprevlink
1771
1771
1772 def gengroup():
1772 def gengroup():
1773 # construct a list of all changed files
1773 # construct a list of all changed files
1774 changedfiles = {}
1774 changedfiles = {}
1775
1775
1776 for chnk in cl.group(nodes, identity,
1776 for chnk in cl.group(nodes, identity,
1777 changed_file_collector(changedfiles)):
1777 changed_file_collector(changedfiles)):
1778 yield chnk
1778 yield chnk
1779 changedfiles = changedfiles.keys()
1779 changedfiles = changedfiles.keys()
1780 changedfiles.sort()
1780 changedfiles.sort()
1781
1781
1782 mnfst = self.manifest
1782 mnfst = self.manifest
1783 nodeiter = gennodelst(mnfst)
1783 nodeiter = gennodelst(mnfst)
1784 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1784 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1785 yield chnk
1785 yield chnk
1786
1786
1787 for fname in changedfiles:
1787 for fname in changedfiles:
1788 filerevlog = self.file(fname)
1788 filerevlog = self.file(fname)
1789 nodeiter = gennodelst(filerevlog)
1789 nodeiter = gennodelst(filerevlog)
1790 nodeiter = list(nodeiter)
1790 nodeiter = list(nodeiter)
1791 if nodeiter:
1791 if nodeiter:
1792 yield changegroup.genchunk(fname)
1792 yield changegroup.genchunk(fname)
1793 lookup = lookuprevlink_func(filerevlog)
1793 lookup = lookuprevlink_func(filerevlog)
1794 for chnk in filerevlog.group(nodeiter, lookup):
1794 for chnk in filerevlog.group(nodeiter, lookup):
1795 yield chnk
1795 yield chnk
1796
1796
1797 yield changegroup.closechunk()
1797 yield changegroup.closechunk()
1798
1798
1799 if nodes:
1799 if nodes:
1800 self.hook('outgoing', node=hex(nodes[0]), source=source)
1800 self.hook('outgoing', node=hex(nodes[0]), source=source)
1801
1801
1802 return util.chunkbuffer(gengroup())
1802 return util.chunkbuffer(gengroup())
1803
1803
1804 def addchangegroup(self, source, srctype, url):
1804 def addchangegroup(self, source, srctype, url):
1805 """add changegroup to repo.
1805 """add changegroup to repo.
1806
1806
1807 return values:
1807 return values:
1808 - nothing changed or no source: 0
1808 - nothing changed or no source: 0
1809 - more heads than before: 1+added heads (2..n)
1809 - more heads than before: 1+added heads (2..n)
1810 - less heads than before: -1-removed heads (-2..-n)
1810 - less heads than before: -1-removed heads (-2..-n)
1811 - number of heads stays the same: 1
1811 - number of heads stays the same: 1
1812 """
1812 """
1813 def csmap(x):
1813 def csmap(x):
1814 self.ui.debug(_("add changeset %s\n") % short(x))
1814 self.ui.debug(_("add changeset %s\n") % short(x))
1815 return cl.count()
1815 return cl.count()
1816
1816
1817 def revmap(x):
1817 def revmap(x):
1818 return cl.rev(x)
1818 return cl.rev(x)
1819
1819
1820 if not source:
1820 if not source:
1821 return 0
1821 return 0
1822
1822
1823 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1823 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1824
1824
1825 changesets = files = revisions = 0
1825 changesets = files = revisions = 0
1826
1826
1827 tr = self.transaction()
1827 tr = self.transaction()
1828
1828
1829 # write changelog data to temp files so concurrent readers will not see
1829 # write changelog data to temp files so concurrent readers will not see
1830 # inconsistent view
1830 # inconsistent view
1831 cl = None
1831 cl = None
1832 try:
1832 try:
1833 cl = appendfile.appendchangelog(self.sopener,
1833 cl = appendfile.appendchangelog(self.sopener,
1834 self.changelog.version)
1834 self.changelog.version)
1835
1835
1836 oldheads = len(cl.heads())
1836 oldheads = len(cl.heads())
1837
1837
1838 # pull off the changeset group
1838 # pull off the changeset group
1839 self.ui.status(_("adding changesets\n"))
1839 self.ui.status(_("adding changesets\n"))
1840 cor = cl.count() - 1
1840 cor = cl.count() - 1
1841 chunkiter = changegroup.chunkiter(source)
1841 chunkiter = changegroup.chunkiter(source)
1842 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1842 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1843 raise util.Abort(_("received changelog group is empty"))
1843 raise util.Abort(_("received changelog group is empty"))
1844 cnr = cl.count() - 1
1844 cnr = cl.count() - 1
1845 changesets = cnr - cor
1845 changesets = cnr - cor
1846
1846
1847 # pull off the manifest group
1847 # pull off the manifest group
1848 self.ui.status(_("adding manifests\n"))
1848 self.ui.status(_("adding manifests\n"))
1849 chunkiter = changegroup.chunkiter(source)
1849 chunkiter = changegroup.chunkiter(source)
1850 # no need to check for empty manifest group here:
1850 # no need to check for empty manifest group here:
1851 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1851 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1852 # no new manifest will be created and the manifest group will
1852 # no new manifest will be created and the manifest group will
1853 # be empty during the pull
1853 # be empty during the pull
1854 self.manifest.addgroup(chunkiter, revmap, tr)
1854 self.manifest.addgroup(chunkiter, revmap, tr)
1855
1855
1856 # process the files
1856 # process the files
1857 self.ui.status(_("adding file changes\n"))
1857 self.ui.status(_("adding file changes\n"))
1858 while 1:
1858 while 1:
1859 f = changegroup.getchunk(source)
1859 f = changegroup.getchunk(source)
1860 if not f:
1860 if not f:
1861 break
1861 break
1862 self.ui.debug(_("adding %s revisions\n") % f)
1862 self.ui.debug(_("adding %s revisions\n") % f)
1863 fl = self.file(f)
1863 fl = self.file(f)
1864 o = fl.count()
1864 o = fl.count()
1865 chunkiter = changegroup.chunkiter(source)
1865 chunkiter = changegroup.chunkiter(source)
1866 if fl.addgroup(chunkiter, revmap, tr) is None:
1866 if fl.addgroup(chunkiter, revmap, tr) is None:
1867 raise util.Abort(_("received file revlog group is empty"))
1867 raise util.Abort(_("received file revlog group is empty"))
1868 revisions += fl.count() - o
1868 revisions += fl.count() - o
1869 files += 1
1869 files += 1
1870
1870
1871 cl.writedata()
1871 cl.writedata()
1872 finally:
1872 finally:
1873 if cl:
1873 if cl:
1874 cl.cleanup()
1874 cl.cleanup()
1875
1875
1876 # make changelog see real files again
1876 # make changelog see real files again
1877 self.changelog = changelog.changelog(self.sopener,
1877 self.changelog = changelog.changelog(self.sopener,
1878 self.changelog.version)
1878 self.changelog.version)
1879 self.changelog.checkinlinesize(tr)
1879 self.changelog.checkinlinesize(tr)
1880
1880
1881 newheads = len(self.changelog.heads())
1881 newheads = len(self.changelog.heads())
1882 heads = ""
1882 heads = ""
1883 if oldheads and newheads != oldheads:
1883 if oldheads and newheads != oldheads:
1884 heads = _(" (%+d heads)") % (newheads - oldheads)
1884 heads = _(" (%+d heads)") % (newheads - oldheads)
1885
1885
1886 self.ui.status(_("added %d changesets"
1886 self.ui.status(_("added %d changesets"
1887 " with %d changes to %d files%s\n")
1887 " with %d changes to %d files%s\n")
1888 % (changesets, revisions, files, heads))
1888 % (changesets, revisions, files, heads))
1889
1889
1890 if changesets > 0:
1890 if changesets > 0:
1891 self.hook('pretxnchangegroup', throw=True,
1891 self.hook('pretxnchangegroup', throw=True,
1892 node=hex(self.changelog.node(cor+1)), source=srctype,
1892 node=hex(self.changelog.node(cor+1)), source=srctype,
1893 url=url)
1893 url=url)
1894
1894
1895 tr.close()
1895 tr.close()
1896
1896
1897 if changesets > 0:
1897 if changesets > 0:
1898 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1898 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 source=srctype, url=url)
1899 source=srctype, url=url)
1900
1900
1901 for i in xrange(cor + 1, cnr + 1):
1901 for i in xrange(cor + 1, cnr + 1):
1902 self.hook("incoming", node=hex(self.changelog.node(i)),
1902 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 source=srctype, url=url)
1903 source=srctype, url=url)
1904
1904
1905 # never return 0 here:
1905 # never return 0 here:
1906 if newheads < oldheads:
1906 if newheads < oldheads:
1907 return newheads - oldheads - 1
1907 return newheads - oldheads - 1
1908 else:
1908 else:
1909 return newheads - oldheads + 1
1909 return newheads - oldheads + 1
1910
1910
1911
1911
1912 def stream_in(self, remote):
1912 def stream_in(self, remote):
1913 fp = remote.stream_out()
1913 fp = remote.stream_out()
1914 l = fp.readline()
1914 l = fp.readline()
1915 try:
1915 try:
1916 resp = int(l)
1916 resp = int(l)
1917 except ValueError:
1917 except ValueError:
1918 raise util.UnexpectedOutput(
1918 raise util.UnexpectedOutput(
1919 _('Unexpected response from remote server:'), l)
1919 _('Unexpected response from remote server:'), l)
1920 if resp == 1:
1920 if resp == 1:
1921 raise util.Abort(_('operation forbidden by server'))
1921 raise util.Abort(_('operation forbidden by server'))
1922 elif resp == 2:
1922 elif resp == 2:
1923 raise util.Abort(_('locking the remote repository failed'))
1923 raise util.Abort(_('locking the remote repository failed'))
1924 elif resp != 0:
1924 elif resp != 0:
1925 raise util.Abort(_('the server sent an unknown error code'))
1925 raise util.Abort(_('the server sent an unknown error code'))
1926 self.ui.status(_('streaming all changes\n'))
1926 self.ui.status(_('streaming all changes\n'))
1927 l = fp.readline()
1927 l = fp.readline()
1928 try:
1928 try:
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1930 except ValueError, TypeError:
1930 except ValueError, TypeError:
1931 raise util.UnexpectedOutput(
1931 raise util.UnexpectedOutput(
1932 _('Unexpected response from remote server:'), l)
1932 _('Unexpected response from remote server:'), l)
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 (total_files, util.bytecount(total_bytes)))
1934 (total_files, util.bytecount(total_bytes)))
1935 start = time.time()
1935 start = time.time()
1936 for i in xrange(total_files):
1936 for i in xrange(total_files):
1937 # XXX doesn't support '\n' or '\r' in filenames
1937 # XXX doesn't support '\n' or '\r' in filenames
1938 l = fp.readline()
1938 l = fp.readline()
1939 try:
1939 try:
1940 name, size = l.split('\0', 1)
1940 name, size = l.split('\0', 1)
1941 size = int(size)
1941 size = int(size)
1942 except ValueError, TypeError:
1942 except ValueError, TypeError:
1943 raise util.UnexpectedOutput(
1943 raise util.UnexpectedOutput(
1944 _('Unexpected response from remote server:'), l)
1944 _('Unexpected response from remote server:'), l)
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 ofp = self.sopener(name, 'w')
1946 ofp = self.sopener(name, 'w')
1947 for chunk in util.filechunkiter(fp, limit=size):
1947 for chunk in util.filechunkiter(fp, limit=size):
1948 ofp.write(chunk)
1948 ofp.write(chunk)
1949 ofp.close()
1949 ofp.close()
1950 elapsed = time.time() - start
1950 elapsed = time.time() - start
1951 if elapsed <= 0:
1951 if elapsed <= 0:
1952 elapsed = 0.001
1952 elapsed = 0.001
1953 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1953 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 (util.bytecount(total_bytes), elapsed,
1954 (util.bytecount(total_bytes), elapsed,
1955 util.bytecount(total_bytes / elapsed)))
1955 util.bytecount(total_bytes / elapsed)))
1956 self.reload()
1956 self.reload()
1957 return len(self.heads()) + 1
1957 return len(self.heads()) + 1
1958
1958
1959 def clone(self, remote, heads=[], stream=False):
1959 def clone(self, remote, heads=[], stream=False):
1960 '''clone remote repository.
1960 '''clone remote repository.
1961
1961
1962 keyword arguments:
1962 keyword arguments:
1963 heads: list of revs to clone (forces use of pull)
1963 heads: list of revs to clone (forces use of pull)
1964 stream: use streaming clone if possible'''
1964 stream: use streaming clone if possible'''
1965
1965
1966 # now, all clients that can request uncompressed clones can
1966 # now, all clients that can request uncompressed clones can
1967 # read repo formats supported by all servers that can serve
1967 # read repo formats supported by all servers that can serve
1968 # them.
1968 # them.
1969
1969
1970 # if revlog format changes, client will have to check version
1970 # if revlog format changes, client will have to check version
1971 # and format flags on "stream" capability, and use
1971 # and format flags on "stream" capability, and use
1972 # uncompressed only if compatible.
1972 # uncompressed only if compatible.
1973
1973
1974 if stream and not heads and remote.capable('stream'):
1974 if stream and not heads and remote.capable('stream'):
1975 return self.stream_in(remote)
1975 return self.stream_in(remote)
1976 return self.pull(remote, heads)
1976 return self.pull(remote, heads)
1977
1977
1978 # used to avoid circular references so destructors work
1978 # used to avoid circular references so destructors work
1979 def aftertrans(files):
1979 def aftertrans(files):
1980 renamefiles = [tuple(t) for t in files]
1980 renamefiles = [tuple(t) for t in files]
1981 def a():
1981 def a():
1982 for src, dest in renamefiles:
1982 for src, dest in renamefiles:
1983 util.rename(src, dest)
1983 util.rename(src, dest)
1984 return a
1984 return a
1985
1985
1986 def instance(ui, path, create):
1986 def instance(ui, path, create):
1987 return localrepository(ui, util.drop_scheme('file', path), create)
1987 return localrepository(ui, util.drop_scheme('file', path), create)
1988
1988
1989 def islocal(path):
1989 def islocal(path):
1990 return True
1990 return True
General Comments 0
You need to be logged in to leave comments. Login now