##// END OF EJS Templates
status: use contexts
Matt Mackall -
r6769:97c12b1e default
parent child Browse files
Show More
@@ -1,2102 +1,2093 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72
72
73 try:
73 try:
74 # files in .hg/ will be created using this mode
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
78 mode = None
79 except OSError:
79 except OSError:
80 mode = None
80 mode = None
81
81
82 self._createmode = mode
82 self._createmode = mode
83 self.opener.createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = util.encodedopener(sopener, self.encodefn)
87
87
88 self.ui = ui.ui(parentui=parentui)
88 self.ui = ui.ui(parentui=parentui)
89 try:
89 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
90 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
91 extensions.loadall(self.ui)
92 except IOError:
92 except IOError:
93 pass
93 pass
94
94
95 self.tagscache = None
95 self.tagscache = None
96 self._tagstypecache = None
96 self._tagstypecache = None
97 self.branchcache = None
97 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
98 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
99 self._branchcachetip = None
100 self.nodetagscache = None
100 self.nodetagscache = None
101 self.filterpats = {}
101 self.filterpats = {}
102 self._datafilters = {}
102 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
103 self._transref = self._lockref = self._wlockref = None
104
104
105 def __getattr__(self, name):
105 def __getattr__(self, name):
106 if name == 'changelog':
106 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
107 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
108 self.sopener.defversion = self.changelog.version
109 return self.changelog
109 return self.changelog
110 if name == 'manifest':
110 if name == 'manifest':
111 self.changelog
111 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
112 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
113 return self.manifest
114 if name == 'dirstate':
114 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
116 return self.dirstate
117 else:
117 else:
118 raise AttributeError, name
118 raise AttributeError, name
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid == None:
121 if changeid == None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, parent=None,
143 def _tag(self, names, node, message, local, user, date, parent=None,
144 extra={}):
144 extra={}):
145 use_dirstate = parent is None
145 use_dirstate = parent is None
146
146
147 if isinstance(names, str):
147 if isinstance(names, str):
148 allchars = names
148 allchars = names
149 names = (names,)
149 names = (names,)
150 else:
150 else:
151 allchars = ''.join(names)
151 allchars = ''.join(names)
152 for c in self.tag_disallowed:
152 for c in self.tag_disallowed:
153 if c in allchars:
153 if c in allchars:
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
155
155
156 for name in names:
156 for name in names:
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 local=local)
158 local=local)
159
159
160 def writetags(fp, names, munge, prevtags):
160 def writetags(fp, names, munge, prevtags):
161 fp.seek(0, 2)
161 fp.seek(0, 2)
162 if prevtags and prevtags[-1] != '\n':
162 if prevtags and prevtags[-1] != '\n':
163 fp.write('\n')
163 fp.write('\n')
164 for name in names:
164 for name in names:
165 m = munge and munge(name) or name
165 m = munge and munge(name) or name
166 if self._tagstypecache and name in self._tagstypecache:
166 if self._tagstypecache and name in self._tagstypecache:
167 old = self.tagscache.get(name, nullid)
167 old = self.tagscache.get(name, nullid)
168 fp.write('%s %s\n' % (hex(old), m))
168 fp.write('%s %s\n' % (hex(old), m))
169 fp.write('%s %s\n' % (hex(node), m))
169 fp.write('%s %s\n' % (hex(node), m))
170 fp.close()
170 fp.close()
171
171
172 prevtags = ''
172 prevtags = ''
173 if local:
173 if local:
174 try:
174 try:
175 fp = self.opener('localtags', 'r+')
175 fp = self.opener('localtags', 'r+')
176 except IOError, err:
176 except IOError, err:
177 fp = self.opener('localtags', 'a')
177 fp = self.opener('localtags', 'a')
178 else:
178 else:
179 prevtags = fp.read()
179 prevtags = fp.read()
180
180
181 # local tags are stored in the current charset
181 # local tags are stored in the current charset
182 writetags(fp, names, None, prevtags)
182 writetags(fp, names, None, prevtags)
183 for name in names:
183 for name in names:
184 self.hook('tag', node=hex(node), tag=name, local=local)
184 self.hook('tag', node=hex(node), tag=name, local=local)
185 return
185 return
186
186
187 if use_dirstate:
187 if use_dirstate:
188 try:
188 try:
189 fp = self.wfile('.hgtags', 'rb+')
189 fp = self.wfile('.hgtags', 'rb+')
190 except IOError, err:
190 except IOError, err:
191 fp = self.wfile('.hgtags', 'ab')
191 fp = self.wfile('.hgtags', 'ab')
192 else:
192 else:
193 prevtags = fp.read()
193 prevtags = fp.read()
194 else:
194 else:
195 try:
195 try:
196 prevtags = self.filectx('.hgtags', parent).data()
196 prevtags = self.filectx('.hgtags', parent).data()
197 except revlog.LookupError:
197 except revlog.LookupError:
198 pass
198 pass
199 fp = self.wfile('.hgtags', 'wb')
199 fp = self.wfile('.hgtags', 'wb')
200 if prevtags:
200 if prevtags:
201 fp.write(prevtags)
201 fp.write(prevtags)
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, util.fromlocal, prevtags)
204 writetags(fp, names, util.fromlocal, prevtags)
205
205
206 if use_dirstate and '.hgtags' not in self.dirstate:
206 if use_dirstate and '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 extra=extra)
210 extra=extra)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self._tag(names, node, message, local, user, date)
243 self._tag(names, node, message, local, user, date)
244
244
245 def tags(self):
245 def tags(self):
246 '''return a mapping of tag to node'''
246 '''return a mapping of tag to node'''
247 if self.tagscache:
247 if self.tagscache:
248 return self.tagscache
248 return self.tagscache
249
249
250 globaltags = {}
250 globaltags = {}
251 tagtypes = {}
251 tagtypes = {}
252
252
253 def readtags(lines, fn, tagtype):
253 def readtags(lines, fn, tagtype):
254 filetags = {}
254 filetags = {}
255 count = 0
255 count = 0
256
256
257 def warn(msg):
257 def warn(msg):
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259
259
260 for l in lines:
260 for l in lines:
261 count += 1
261 count += 1
262 if not l:
262 if not l:
263 continue
263 continue
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 warn(_("cannot parse entry"))
266 warn(_("cannot parse entry"))
267 continue
267 continue
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 warn(_("node '%s' is not well formed") % node)
273 warn(_("node '%s' is not well formed") % node)
274 continue
274 continue
275 if bin_n not in self.changelog.nodemap:
275 if bin_n not in self.changelog.nodemap:
276 warn(_("tag '%s' refers to unknown node") % key)
276 warn(_("tag '%s' refers to unknown node") % key)
277 continue
277 continue
278
278
279 h = []
279 h = []
280 if key in filetags:
280 if key in filetags:
281 n, h = filetags[key]
281 n, h = filetags[key]
282 h.append(n)
282 h.append(n)
283 filetags[key] = (bin_n, h)
283 filetags[key] = (bin_n, h)
284
284
285 for k, nh in filetags.items():
285 for k, nh in filetags.items():
286 if k not in globaltags:
286 if k not in globaltags:
287 globaltags[k] = nh
287 globaltags[k] = nh
288 tagtypes[k] = tagtype
288 tagtypes[k] = tagtype
289 continue
289 continue
290
290
291 # we prefer the global tag if:
291 # we prefer the global tag if:
292 # it supercedes us OR
292 # it supercedes us OR
293 # mutual supercedes and it has a higher rank
293 # mutual supercedes and it has a higher rank
294 # otherwise we win because we're tip-most
294 # otherwise we win because we're tip-most
295 an, ah = nh
295 an, ah = nh
296 bn, bh = globaltags[k]
296 bn, bh = globaltags[k]
297 if (bn != an and an in bh and
297 if (bn != an and an in bh and
298 (bn not in ah or len(bh) > len(ah))):
298 (bn not in ah or len(bh) > len(ah))):
299 an = bn
299 an = bn
300 ah.extend([n for n in bh if n not in ah])
300 ah.extend([n for n in bh if n not in ah])
301 globaltags[k] = an, ah
301 globaltags[k] = an, ah
302 tagtypes[k] = tagtype
302 tagtypes[k] = tagtype
303
303
304 # read the tags file from each head, ending with the tip
304 # read the tags file from each head, ending with the tip
305 f = None
305 f = None
306 for rev, node, fnode in self._hgtagsnodes():
306 for rev, node, fnode in self._hgtagsnodes():
307 f = (f and f.filectx(fnode) or
307 f = (f and f.filectx(fnode) or
308 self.filectx('.hgtags', fileid=fnode))
308 self.filectx('.hgtags', fileid=fnode))
309 readtags(f.data().splitlines(), f, "global")
309 readtags(f.data().splitlines(), f, "global")
310
310
311 try:
311 try:
312 data = util.fromlocal(self.opener("localtags").read())
312 data = util.fromlocal(self.opener("localtags").read())
313 # localtags are stored in the local character set
313 # localtags are stored in the local character set
314 # while the internal tag table is stored in UTF-8
314 # while the internal tag table is stored in UTF-8
315 readtags(data.splitlines(), "localtags", "local")
315 readtags(data.splitlines(), "localtags", "local")
316 except IOError:
316 except IOError:
317 pass
317 pass
318
318
319 self.tagscache = {}
319 self.tagscache = {}
320 self._tagstypecache = {}
320 self._tagstypecache = {}
321 for k,nh in globaltags.items():
321 for k,nh in globaltags.items():
322 n = nh[0]
322 n = nh[0]
323 if n != nullid:
323 if n != nullid:
324 self.tagscache[k] = n
324 self.tagscache[k] = n
325 self._tagstypecache[k] = tagtypes[k]
325 self._tagstypecache[k] = tagtypes[k]
326 self.tagscache['tip'] = self.changelog.tip()
326 self.tagscache['tip'] = self.changelog.tip()
327 return self.tagscache
327 return self.tagscache
328
328
329 def tagtype(self, tagname):
329 def tagtype(self, tagname):
330 '''
330 '''
331 return the type of the given tag. result can be:
331 return the type of the given tag. result can be:
332
332
333 'local' : a local tag
333 'local' : a local tag
334 'global' : a global tag
334 'global' : a global tag
335 None : tag does not exist
335 None : tag does not exist
336 '''
336 '''
337
337
338 self.tags()
338 self.tags()
339
339
340 return self._tagstypecache.get(tagname)
340 return self._tagstypecache.get(tagname)
341
341
342 def _hgtagsnodes(self):
342 def _hgtagsnodes(self):
343 heads = self.heads()
343 heads = self.heads()
344 heads.reverse()
344 heads.reverse()
345 last = {}
345 last = {}
346 ret = []
346 ret = []
347 for node in heads:
347 for node in heads:
348 c = self[node]
348 c = self[node]
349 rev = c.rev()
349 rev = c.rev()
350 try:
350 try:
351 fnode = c.filenode('.hgtags')
351 fnode = c.filenode('.hgtags')
352 except revlog.LookupError:
352 except revlog.LookupError:
353 continue
353 continue
354 ret.append((rev, node, fnode))
354 ret.append((rev, node, fnode))
355 if fnode in last:
355 if fnode in last:
356 ret[last[fnode]] = None
356 ret[last[fnode]] = None
357 last[fnode] = len(ret) - 1
357 last[fnode] = len(ret) - 1
358 return [item for item in ret if item]
358 return [item for item in ret if item]
359
359
360 def tagslist(self):
360 def tagslist(self):
361 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
362 l = []
362 l = []
363 for t, n in self.tags().items():
363 for t, n in self.tags().items():
364 try:
364 try:
365 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
366 except:
366 except:
367 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
368 l.append((r, t, n))
369 return [(t, n) for r, t, n in util.sort(l)]
369 return [(t, n) for r, t, n in util.sort(l)]
370
370
371 def nodetags(self, node):
371 def nodetags(self, node):
372 '''return the tags associated with a node'''
372 '''return the tags associated with a node'''
373 if not self.nodetagscache:
373 if not self.nodetagscache:
374 self.nodetagscache = {}
374 self.nodetagscache = {}
375 for t, n in self.tags().items():
375 for t, n in self.tags().items():
376 self.nodetagscache.setdefault(n, []).append(t)
376 self.nodetagscache.setdefault(n, []).append(t)
377 return self.nodetagscache.get(node, [])
377 return self.nodetagscache.get(node, [])
378
378
379 def _branchtags(self, partial, lrev):
379 def _branchtags(self, partial, lrev):
380 tiprev = len(self) - 1
380 tiprev = len(self) - 1
381 if lrev != tiprev:
381 if lrev != tiprev:
382 self._updatebranchcache(partial, lrev+1, tiprev+1)
382 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384
384
385 return partial
385 return partial
386
386
387 def branchtags(self):
387 def branchtags(self):
388 tip = self.changelog.tip()
388 tip = self.changelog.tip()
389 if self.branchcache is not None and self._branchcachetip == tip:
389 if self.branchcache is not None and self._branchcachetip == tip:
390 return self.branchcache
390 return self.branchcache
391
391
392 oldtip = self._branchcachetip
392 oldtip = self._branchcachetip
393 self._branchcachetip = tip
393 self._branchcachetip = tip
394 if self.branchcache is None:
394 if self.branchcache is None:
395 self.branchcache = {} # avoid recursion in changectx
395 self.branchcache = {} # avoid recursion in changectx
396 else:
396 else:
397 self.branchcache.clear() # keep using the same dict
397 self.branchcache.clear() # keep using the same dict
398 if oldtip is None or oldtip not in self.changelog.nodemap:
398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
399 partial, last, lrev = self._readbranchcache()
400 else:
400 else:
401 lrev = self.changelog.rev(oldtip)
401 lrev = self.changelog.rev(oldtip)
402 partial = self._ubranchcache
402 partial = self._ubranchcache
403
403
404 self._branchtags(partial, lrev)
404 self._branchtags(partial, lrev)
405
405
406 # the branch cache is stored on disk as UTF-8, but in the local
406 # the branch cache is stored on disk as UTF-8, but in the local
407 # charset internally
407 # charset internally
408 for k, v in partial.items():
408 for k, v in partial.items():
409 self.branchcache[util.tolocal(k)] = v
409 self.branchcache[util.tolocal(k)] = v
410 self._ubranchcache = partial
410 self._ubranchcache = partial
411 return self.branchcache
411 return self.branchcache
412
412
413 def _readbranchcache(self):
413 def _readbranchcache(self):
414 partial = {}
414 partial = {}
415 try:
415 try:
416 f = self.opener("branch.cache")
416 f = self.opener("branch.cache")
417 lines = f.read().split('\n')
417 lines = f.read().split('\n')
418 f.close()
418 f.close()
419 except (IOError, OSError):
419 except (IOError, OSError):
420 return {}, nullid, nullrev
420 return {}, nullid, nullrev
421
421
422 try:
422 try:
423 last, lrev = lines.pop(0).split(" ", 1)
423 last, lrev = lines.pop(0).split(" ", 1)
424 last, lrev = bin(last), int(lrev)
424 last, lrev = bin(last), int(lrev)
425 if lrev >= len(self) or self[lrev].node() != last:
425 if lrev >= len(self) or self[lrev].node() != last:
426 # invalidate the cache
426 # invalidate the cache
427 raise ValueError('invalidating branch cache (tip differs)')
427 raise ValueError('invalidating branch cache (tip differs)')
428 for l in lines:
428 for l in lines:
429 if not l: continue
429 if not l: continue
430 node, label = l.split(" ", 1)
430 node, label = l.split(" ", 1)
431 partial[label.strip()] = bin(node)
431 partial[label.strip()] = bin(node)
432 except (KeyboardInterrupt, util.SignalInterrupt):
432 except (KeyboardInterrupt, util.SignalInterrupt):
433 raise
433 raise
434 except Exception, inst:
434 except Exception, inst:
435 if self.ui.debugflag:
435 if self.ui.debugflag:
436 self.ui.warn(str(inst), '\n')
436 self.ui.warn(str(inst), '\n')
437 partial, last, lrev = {}, nullid, nullrev
437 partial, last, lrev = {}, nullid, nullrev
438 return partial, last, lrev
438 return partial, last, lrev
439
439
440 def _writebranchcache(self, branches, tip, tiprev):
440 def _writebranchcache(self, branches, tip, tiprev):
441 try:
441 try:
442 f = self.opener("branch.cache", "w", atomictemp=True)
442 f = self.opener("branch.cache", "w", atomictemp=True)
443 f.write("%s %s\n" % (hex(tip), tiprev))
443 f.write("%s %s\n" % (hex(tip), tiprev))
444 for label, node in branches.iteritems():
444 for label, node in branches.iteritems():
445 f.write("%s %s\n" % (hex(node), label))
445 f.write("%s %s\n" % (hex(node), label))
446 f.rename()
446 f.rename()
447 except (IOError, OSError):
447 except (IOError, OSError):
448 pass
448 pass
449
449
450 def _updatebranchcache(self, partial, start, end):
450 def _updatebranchcache(self, partial, start, end):
451 for r in xrange(start, end):
451 for r in xrange(start, end):
452 c = self[r]
452 c = self[r]
453 b = c.branch()
453 b = c.branch()
454 partial[b] = c.node()
454 partial[b] = c.node()
455
455
456 def lookup(self, key):
456 def lookup(self, key):
457 if key == '.':
457 if key == '.':
458 return self.dirstate.parents()[0]
458 return self.dirstate.parents()[0]
459 elif key == 'null':
459 elif key == 'null':
460 return nullid
460 return nullid
461 n = self.changelog._match(key)
461 n = self.changelog._match(key)
462 if n:
462 if n:
463 return n
463 return n
464 if key in self.tags():
464 if key in self.tags():
465 return self.tags()[key]
465 return self.tags()[key]
466 if key in self.branchtags():
466 if key in self.branchtags():
467 return self.branchtags()[key]
467 return self.branchtags()[key]
468 n = self.changelog._partialmatch(key)
468 n = self.changelog._partialmatch(key)
469 if n:
469 if n:
470 return n
470 return n
471 try:
471 try:
472 if len(key) == 20:
472 if len(key) == 20:
473 key = hex(key)
473 key = hex(key)
474 except:
474 except:
475 pass
475 pass
476 raise repo.RepoError(_("unknown revision '%s'") % key)
476 raise repo.RepoError(_("unknown revision '%s'") % key)
477
477
478 def local(self):
478 def local(self):
479 return True
479 return True
480
480
481 def join(self, f):
481 def join(self, f):
482 return os.path.join(self.path, f)
482 return os.path.join(self.path, f)
483
483
484 def sjoin(self, f):
484 def sjoin(self, f):
485 f = self.encodefn(f)
485 f = self.encodefn(f)
486 return os.path.join(self.spath, f)
486 return os.path.join(self.spath, f)
487
487
488 def wjoin(self, f):
488 def wjoin(self, f):
489 return os.path.join(self.root, f)
489 return os.path.join(self.root, f)
490
490
491 def rjoin(self, f):
491 def rjoin(self, f):
492 return os.path.join(self.root, util.pconvert(f))
492 return os.path.join(self.root, util.pconvert(f))
493
493
494 def file(self, f):
494 def file(self, f):
495 if f[0] == '/':
495 if f[0] == '/':
496 f = f[1:]
496 f = f[1:]
497 return filelog.filelog(self.sopener, f)
497 return filelog.filelog(self.sopener, f)
498
498
499 def changectx(self, changeid):
499 def changectx(self, changeid):
500 return self[changeid]
500 return self[changeid]
501
501
502 def parents(self, changeid=None):
502 def parents(self, changeid=None):
503 '''get list of changectxs for parents of changeid'''
503 '''get list of changectxs for parents of changeid'''
504 return self[changeid].parents()
504 return self[changeid].parents()
505
505
506 def filectx(self, path, changeid=None, fileid=None):
506 def filectx(self, path, changeid=None, fileid=None):
507 """changeid can be a changeset revision, node, or tag.
507 """changeid can be a changeset revision, node, or tag.
508 fileid can be a file revision or node."""
508 fileid can be a file revision or node."""
509 return context.filectx(self, path, changeid, fileid)
509 return context.filectx(self, path, changeid, fileid)
510
510
511 def getcwd(self):
511 def getcwd(self):
512 return self.dirstate.getcwd()
512 return self.dirstate.getcwd()
513
513
514 def pathto(self, f, cwd=None):
514 def pathto(self, f, cwd=None):
515 return self.dirstate.pathto(f, cwd)
515 return self.dirstate.pathto(f, cwd)
516
516
517 def wfile(self, f, mode='r'):
517 def wfile(self, f, mode='r'):
518 return self.wopener(f, mode)
518 return self.wopener(f, mode)
519
519
520 def _link(self, f):
520 def _link(self, f):
521 return os.path.islink(self.wjoin(f))
521 return os.path.islink(self.wjoin(f))
522
522
523 def _filter(self, filter, filename, data):
523 def _filter(self, filter, filename, data):
524 if filter not in self.filterpats:
524 if filter not in self.filterpats:
525 l = []
525 l = []
526 for pat, cmd in self.ui.configitems(filter):
526 for pat, cmd in self.ui.configitems(filter):
527 mf = util.matcher(self.root, "", [pat], [], [])[1]
527 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 fn = None
528 fn = None
529 params = cmd
529 params = cmd
530 for name, filterfn in self._datafilters.iteritems():
530 for name, filterfn in self._datafilters.iteritems():
531 if cmd.startswith(name):
531 if cmd.startswith(name):
532 fn = filterfn
532 fn = filterfn
533 params = cmd[len(name):].lstrip()
533 params = cmd[len(name):].lstrip()
534 break
534 break
535 if not fn:
535 if not fn:
536 fn = lambda s, c, **kwargs: util.filter(s, c)
536 fn = lambda s, c, **kwargs: util.filter(s, c)
537 # Wrap old filters not supporting keyword arguments
537 # Wrap old filters not supporting keyword arguments
538 if not inspect.getargspec(fn)[2]:
538 if not inspect.getargspec(fn)[2]:
539 oldfn = fn
539 oldfn = fn
540 fn = lambda s, c, **kwargs: oldfn(s, c)
540 fn = lambda s, c, **kwargs: oldfn(s, c)
541 l.append((mf, fn, params))
541 l.append((mf, fn, params))
542 self.filterpats[filter] = l
542 self.filterpats[filter] = l
543
543
544 for mf, fn, cmd in self.filterpats[filter]:
544 for mf, fn, cmd in self.filterpats[filter]:
545 if mf(filename):
545 if mf(filename):
546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 break
548 break
549
549
550 return data
550 return data
551
551
552 def adddatafilter(self, name, filter):
552 def adddatafilter(self, name, filter):
553 self._datafilters[name] = filter
553 self._datafilters[name] = filter
554
554
555 def wread(self, filename):
555 def wread(self, filename):
556 if self._link(filename):
556 if self._link(filename):
557 data = os.readlink(self.wjoin(filename))
557 data = os.readlink(self.wjoin(filename))
558 else:
558 else:
559 data = self.wopener(filename, 'r').read()
559 data = self.wopener(filename, 'r').read()
560 return self._filter("encode", filename, data)
560 return self._filter("encode", filename, data)
561
561
562 def wwrite(self, filename, data, flags):
562 def wwrite(self, filename, data, flags):
563 data = self._filter("decode", filename, data)
563 data = self._filter("decode", filename, data)
564 try:
564 try:
565 os.unlink(self.wjoin(filename))
565 os.unlink(self.wjoin(filename))
566 except OSError:
566 except OSError:
567 pass
567 pass
568 self.wopener(filename, 'w').write(data)
568 self.wopener(filename, 'w').write(data)
569 util.set_flags(self.wjoin(filename), flags)
569 util.set_flags(self.wjoin(filename), flags)
570
570
571 def wwritedata(self, filename, data):
571 def wwritedata(self, filename, data):
572 return self._filter("decode", filename, data)
572 return self._filter("decode", filename, data)
573
573
574 def transaction(self):
574 def transaction(self):
575 if self._transref and self._transref():
575 if self._transref and self._transref():
576 return self._transref().nest()
576 return self._transref().nest()
577
577
578 # abort here if the journal already exists
578 # abort here if the journal already exists
579 if os.path.exists(self.sjoin("journal")):
579 if os.path.exists(self.sjoin("journal")):
580 raise repo.RepoError(_("journal already exists - run hg recover"))
580 raise repo.RepoError(_("journal already exists - run hg recover"))
581
581
582 # save dirstate for rollback
582 # save dirstate for rollback
583 try:
583 try:
584 ds = self.opener("dirstate").read()
584 ds = self.opener("dirstate").read()
585 except IOError:
585 except IOError:
586 ds = ""
586 ds = ""
587 self.opener("journal.dirstate", "w").write(ds)
587 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.branch", "w").write(self.dirstate.branch())
588 self.opener("journal.branch", "w").write(self.dirstate.branch())
589
589
590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.branch"), self.join("undo.branch"))]
592 (self.join("journal.branch"), self.join("undo.branch"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
594 self.sjoin("journal"),
595 aftertrans(renames),
595 aftertrans(renames),
596 self._createmode)
596 self._createmode)
597 self._transref = weakref.ref(tr)
597 self._transref = weakref.ref(tr)
598 return tr
598 return tr
599
599
600 def recover(self):
600 def recover(self):
601 l = self.lock()
601 l = self.lock()
602 try:
602 try:
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"))
605 transaction.rollback(self.sopener, self.sjoin("journal"))
606 self.invalidate()
606 self.invalidate()
607 return True
607 return True
608 else:
608 else:
609 self.ui.warn(_("no interrupted transaction available\n"))
609 self.ui.warn(_("no interrupted transaction available\n"))
610 return False
610 return False
611 finally:
611 finally:
612 del l
612 del l
613
613
614 def rollback(self):
614 def rollback(self):
615 wlock = lock = None
615 wlock = lock = None
616 try:
616 try:
617 wlock = self.wlock()
617 wlock = self.wlock()
618 lock = self.lock()
618 lock = self.lock()
619 if os.path.exists(self.sjoin("undo")):
619 if os.path.exists(self.sjoin("undo")):
620 self.ui.status(_("rolling back last transaction\n"))
620 self.ui.status(_("rolling back last transaction\n"))
621 transaction.rollback(self.sopener, self.sjoin("undo"))
621 transaction.rollback(self.sopener, self.sjoin("undo"))
622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 try:
623 try:
624 branch = self.opener("undo.branch").read()
624 branch = self.opener("undo.branch").read()
625 self.dirstate.setbranch(branch)
625 self.dirstate.setbranch(branch)
626 except IOError:
626 except IOError:
627 self.ui.warn(_("Named branch could not be reset, "
627 self.ui.warn(_("Named branch could not be reset, "
628 "current branch still is: %s\n")
628 "current branch still is: %s\n")
629 % util.tolocal(self.dirstate.branch()))
629 % util.tolocal(self.dirstate.branch()))
630 self.invalidate()
630 self.invalidate()
631 self.dirstate.invalidate()
631 self.dirstate.invalidate()
632 else:
632 else:
633 self.ui.warn(_("no rollback information available\n"))
633 self.ui.warn(_("no rollback information available\n"))
634 finally:
634 finally:
635 del lock, wlock
635 del lock, wlock
636
636
637 def invalidate(self):
637 def invalidate(self):
638 for a in "changelog manifest".split():
638 for a in "changelog manifest".split():
639 if a in self.__dict__:
639 if a in self.__dict__:
640 delattr(self, a)
640 delattr(self, a)
641 self.tagscache = None
641 self.tagscache = None
642 self._tagstypecache = None
642 self._tagstypecache = None
643 self.nodetagscache = None
643 self.nodetagscache = None
644 self.branchcache = None
644 self.branchcache = None
645 self._ubranchcache = None
645 self._ubranchcache = None
646 self._branchcachetip = None
646 self._branchcachetip = None
647
647
648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 try:
649 try:
650 l = lock.lock(lockname, 0, releasefn, desc=desc)
650 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 except lock.LockHeld, inst:
651 except lock.LockHeld, inst:
652 if not wait:
652 if not wait:
653 raise
653 raise
654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 (desc, inst.locker))
655 (desc, inst.locker))
656 # default to 600 seconds timeout
656 # default to 600 seconds timeout
657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 releasefn, desc=desc)
658 releasefn, desc=desc)
659 if acquirefn:
659 if acquirefn:
660 acquirefn()
660 acquirefn()
661 return l
661 return l
662
662
663 def lock(self, wait=True):
663 def lock(self, wait=True):
664 if self._lockref and self._lockref():
664 if self._lockref and self._lockref():
665 return self._lockref()
665 return self._lockref()
666
666
667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 _('repository %s') % self.origroot)
668 _('repository %s') % self.origroot)
669 self._lockref = weakref.ref(l)
669 self._lockref = weakref.ref(l)
670 return l
670 return l
671
671
672 def wlock(self, wait=True):
672 def wlock(self, wait=True):
673 if self._wlockref and self._wlockref():
673 if self._wlockref and self._wlockref():
674 return self._wlockref()
674 return self._wlockref()
675
675
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
677 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
678 self.origroot)
679 self._wlockref = weakref.ref(l)
679 self._wlockref = weakref.ref(l)
680 return l
680 return l
681
681
682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
683 """
684 commit an individual file as part of a larger transaction
684 commit an individual file as part of a larger transaction
685 """
685 """
686
686
687 fn = fctx.path()
687 fn = fctx.path()
688 t = fctx.data()
688 t = fctx.data()
689 fl = self.file(fn)
689 fl = self.file(fn)
690 fp1 = manifest1.get(fn, nullid)
690 fp1 = manifest1.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
692
692
693 meta = {}
693 meta = {}
694 cp = fctx.renamed()
694 cp = fctx.renamed()
695 if cp and cp[0] != fn:
695 if cp and cp[0] != fn:
696 cp = cp[0]
696 cp = cp[0]
697 # Mark the new revision of this file as a copy of another
697 # Mark the new revision of this file as a copy of another
698 # file. This copy data will effectively act as a parent
698 # file. This copy data will effectively act as a parent
699 # of this new revision. If this is a merge, the first
699 # of this new revision. If this is a merge, the first
700 # parent will be the nullid (meaning "look up the copy data")
700 # parent will be the nullid (meaning "look up the copy data")
701 # and the second one will be the other parent. For example:
701 # and the second one will be the other parent. For example:
702 #
702 #
703 # 0 --- 1 --- 3 rev1 changes file foo
703 # 0 --- 1 --- 3 rev1 changes file foo
704 # \ / rev2 renames foo to bar and changes it
704 # \ / rev2 renames foo to bar and changes it
705 # \- 2 -/ rev3 should have bar with all changes and
705 # \- 2 -/ rev3 should have bar with all changes and
706 # should record that bar descends from
706 # should record that bar descends from
707 # bar in rev2 and foo in rev1
707 # bar in rev2 and foo in rev1
708 #
708 #
709 # this allows this merge to succeed:
709 # this allows this merge to succeed:
710 #
710 #
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
713 # \- 2 --- 4 as the merge base
713 # \- 2 --- 4 as the merge base
714 #
714 #
715 meta["copy"] = cp
715 meta["copy"] = cp
716 if not manifest2: # not a branch merge
716 if not manifest2: # not a branch merge
717 meta["copyrev"] = hex(manifest1[cp])
717 meta["copyrev"] = hex(manifest1[cp])
718 fp2 = nullid
718 fp2 = nullid
719 elif fp2 != nullid: # copied on remote side
719 elif fp2 != nullid: # copied on remote side
720 meta["copyrev"] = hex(manifest1[cp])
720 meta["copyrev"] = hex(manifest1[cp])
721 elif fp1 != nullid: # copied on local side, reversed
721 elif fp1 != nullid: # copied on local side, reversed
722 meta["copyrev"] = hex(manifest2[cp])
722 meta["copyrev"] = hex(manifest2[cp])
723 fp2 = fp1
723 fp2 = fp1
724 elif cp in manifest2: # directory rename on local side
724 elif cp in manifest2: # directory rename on local side
725 meta["copyrev"] = hex(manifest2[cp])
725 meta["copyrev"] = hex(manifest2[cp])
726 else: # directory rename on remote side
726 else: # directory rename on remote side
727 meta["copyrev"] = hex(manifest1[cp])
727 meta["copyrev"] = hex(manifest1[cp])
728 self.ui.debug(_(" %s: copy %s:%s\n") %
728 self.ui.debug(_(" %s: copy %s:%s\n") %
729 (fn, cp, meta["copyrev"]))
729 (fn, cp, meta["copyrev"]))
730 fp1 = nullid
730 fp1 = nullid
731 elif fp2 != nullid:
731 elif fp2 != nullid:
732 # is one parent an ancestor of the other?
732 # is one parent an ancestor of the other?
733 fpa = fl.ancestor(fp1, fp2)
733 fpa = fl.ancestor(fp1, fp2)
734 if fpa == fp1:
734 if fpa == fp1:
735 fp1, fp2 = fp2, nullid
735 fp1, fp2 = fp2, nullid
736 elif fpa == fp2:
736 elif fpa == fp2:
737 fp2 = nullid
737 fp2 = nullid
738
738
739 # is the file unmodified from the parent? report existing entry
739 # is the file unmodified from the parent? report existing entry
740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 return fp1
741 return fp1
742
742
743 changelist.append(fn)
743 changelist.append(fn)
744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745
745
746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 if p1 is None:
747 if p1 is None:
748 p1, p2 = self.dirstate.parents()
748 p1, p2 = self.dirstate.parents()
749 return self.commit(files=files, text=text, user=user, date=date,
749 return self.commit(files=files, text=text, user=user, date=date,
750 p1=p1, p2=p2, extra=extra, empty_ok=True)
750 p1=p1, p2=p2, extra=extra, empty_ok=True)
751
751
752 def commit(self, files=None, text="", user=None, date=None,
752 def commit(self, files=None, text="", user=None, date=None,
753 match=None, force=False, force_editor=False,
753 match=None, force=False, force_editor=False,
754 p1=None, p2=None, extra={}, empty_ok=False):
754 p1=None, p2=None, extra={}, empty_ok=False):
755 wlock = lock = None
755 wlock = lock = None
756 if files:
756 if files:
757 files = util.unique(files)
757 files = util.unique(files)
758 try:
758 try:
759 wlock = self.wlock()
759 wlock = self.wlock()
760 lock = self.lock()
760 lock = self.lock()
761 use_dirstate = (p1 is None) # not rawcommit
761 use_dirstate = (p1 is None) # not rawcommit
762
762
763 if use_dirstate:
763 if use_dirstate:
764 p1, p2 = self.dirstate.parents()
764 p1, p2 = self.dirstate.parents()
765 update_dirstate = True
765 update_dirstate = True
766
766
767 if (not force and p2 != nullid and
767 if (not force and p2 != nullid and
768 (match and (match.files() or match.anypats()))):
768 (match and (match.files() or match.anypats()))):
769 raise util.Abort(_('cannot partially commit a merge '
769 raise util.Abort(_('cannot partially commit a merge '
770 '(do not specify files or patterns)'))
770 '(do not specify files or patterns)'))
771
771
772 if files:
772 if files:
773 modified, removed = [], []
773 modified, removed = [], []
774 for f in files:
774 for f in files:
775 s = self.dirstate[f]
775 s = self.dirstate[f]
776 if s in 'nma':
776 if s in 'nma':
777 modified.append(f)
777 modified.append(f)
778 elif s == 'r':
778 elif s == 'r':
779 removed.append(f)
779 removed.append(f)
780 else:
780 else:
781 self.ui.warn(_("%s not tracked!\n") % f)
781 self.ui.warn(_("%s not tracked!\n") % f)
782 changes = [modified, [], removed, [], []]
782 changes = [modified, [], removed, [], []]
783 else:
783 else:
784 changes = self.status(match=match)
784 changes = self.status(match=match)
785 else:
785 else:
786 p1, p2 = p1, p2 or nullid
786 p1, p2 = p1, p2 or nullid
787 update_dirstate = (self.dirstate.parents()[0] == p1)
787 update_dirstate = (self.dirstate.parents()[0] == p1)
788 changes = [files, [], [], [], []]
788 changes = [files, [], [], [], []]
789
789
790 wctx = context.workingctx(self, (p1, p2), text, user, date,
790 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 extra, changes)
791 extra, changes)
792 return self._commitctx(wctx, force, force_editor, empty_ok,
792 return self._commitctx(wctx, force, force_editor, empty_ok,
793 use_dirstate, update_dirstate)
793 use_dirstate, update_dirstate)
794 finally:
794 finally:
795 del lock, wlock
795 del lock, wlock
796
796
797 def commitctx(self, ctx):
797 def commitctx(self, ctx):
798 wlock = lock = None
798 wlock = lock = None
799 try:
799 try:
800 wlock = self.wlock()
800 wlock = self.wlock()
801 lock = self.lock()
801 lock = self.lock()
802 return self._commitctx(ctx, force=True, force_editor=False,
802 return self._commitctx(ctx, force=True, force_editor=False,
803 empty_ok=True, use_dirstate=False,
803 empty_ok=True, use_dirstate=False,
804 update_dirstate=False)
804 update_dirstate=False)
805 finally:
805 finally:
806 del lock, wlock
806 del lock, wlock
807
807
808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 use_dirstate=True, update_dirstate=True):
809 use_dirstate=True, update_dirstate=True):
810 tr = None
810 tr = None
811 valid = 0 # don't save the dirstate if this isn't set
811 valid = 0 # don't save the dirstate if this isn't set
812 try:
812 try:
813 commit = util.sort(wctx.modified() + wctx.added())
813 commit = util.sort(wctx.modified() + wctx.added())
814 remove = wctx.removed()
814 remove = wctx.removed()
815 extra = wctx.extra().copy()
815 extra = wctx.extra().copy()
816 branchname = extra['branch']
816 branchname = extra['branch']
817 user = wctx.user()
817 user = wctx.user()
818 text = wctx.description()
818 text = wctx.description()
819
819
820 p1, p2 = [p.node() for p in wctx.parents()]
820 p1, p2 = [p.node() for p in wctx.parents()]
821 c1 = self.changelog.read(p1)
821 c1 = self.changelog.read(p1)
822 c2 = self.changelog.read(p2)
822 c2 = self.changelog.read(p2)
823 m1 = self.manifest.read(c1[0]).copy()
823 m1 = self.manifest.read(c1[0]).copy()
824 m2 = self.manifest.read(c2[0])
824 m2 = self.manifest.read(c2[0])
825
825
826 if use_dirstate:
826 if use_dirstate:
827 oldname = c1[5].get("branch") # stored in UTF-8
827 oldname = c1[5].get("branch") # stored in UTF-8
828 if (not commit and not remove and not force and p2 == nullid
828 if (not commit and not remove and not force and p2 == nullid
829 and branchname == oldname):
829 and branchname == oldname):
830 self.ui.status(_("nothing changed\n"))
830 self.ui.status(_("nothing changed\n"))
831 return None
831 return None
832
832
833 xp1 = hex(p1)
833 xp1 = hex(p1)
834 if p2 == nullid: xp2 = ''
834 if p2 == nullid: xp2 = ''
835 else: xp2 = hex(p2)
835 else: xp2 = hex(p2)
836
836
837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838
838
839 tr = self.transaction()
839 tr = self.transaction()
840 trp = weakref.proxy(tr)
840 trp = weakref.proxy(tr)
841
841
842 # check in files
842 # check in files
843 new = {}
843 new = {}
844 changed = []
844 changed = []
845 linkrev = len(self)
845 linkrev = len(self)
846 for f in commit:
846 for f in commit:
847 self.ui.note(f + "\n")
847 self.ui.note(f + "\n")
848 try:
848 try:
849 fctx = wctx.filectx(f)
849 fctx = wctx.filectx(f)
850 newflags = fctx.flags()
850 newflags = fctx.flags()
851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 if ((not changed or changed[-1] != f) and
852 if ((not changed or changed[-1] != f) and
853 m2.get(f) != new[f]):
853 m2.get(f) != new[f]):
854 # mention the file in the changelog if some
854 # mention the file in the changelog if some
855 # flag changed, even if there was no content
855 # flag changed, even if there was no content
856 # change.
856 # change.
857 if m1.flags(f) != newflags:
857 if m1.flags(f) != newflags:
858 changed.append(f)
858 changed.append(f)
859 m1.set(f, newflags)
859 m1.set(f, newflags)
860 if use_dirstate:
860 if use_dirstate:
861 self.dirstate.normal(f)
861 self.dirstate.normal(f)
862
862
863 except (OSError, IOError):
863 except (OSError, IOError):
864 if use_dirstate:
864 if use_dirstate:
865 self.ui.warn(_("trouble committing %s!\n") % f)
865 self.ui.warn(_("trouble committing %s!\n") % f)
866 raise
866 raise
867 else:
867 else:
868 remove.append(f)
868 remove.append(f)
869
869
870 # update manifest
870 # update manifest
871 m1.update(new)
871 m1.update(new)
872 removed = []
872 removed = []
873
873
874 for f in util.sort(remove):
874 for f in util.sort(remove):
875 if f in m1:
875 if f in m1:
876 del m1[f]
876 del m1[f]
877 removed.append(f)
877 removed.append(f)
878 elif f in m2:
878 elif f in m2:
879 removed.append(f)
879 removed.append(f)
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
881 (new, removed))
881 (new, removed))
882
882
883 # add changeset
883 # add changeset
884 if (not empty_ok and not text) or force_editor:
884 if (not empty_ok and not text) or force_editor:
885 edittext = []
885 edittext = []
886 if text:
886 if text:
887 edittext.append(text)
887 edittext.append(text)
888 edittext.append("")
888 edittext.append("")
889 edittext.append(_("HG: Enter commit message."
889 edittext.append(_("HG: Enter commit message."
890 " Lines beginning with 'HG:' are removed."))
890 " Lines beginning with 'HG:' are removed."))
891 edittext.append("HG: --")
891 edittext.append("HG: --")
892 edittext.append("HG: user: %s" % user)
892 edittext.append("HG: user: %s" % user)
893 if p2 != nullid:
893 if p2 != nullid:
894 edittext.append("HG: branch merge")
894 edittext.append("HG: branch merge")
895 if branchname:
895 if branchname:
896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 edittext.extend(["HG: changed %s" % f for f in changed])
897 edittext.extend(["HG: changed %s" % f for f in changed])
898 edittext.extend(["HG: removed %s" % f for f in removed])
898 edittext.extend(["HG: removed %s" % f for f in removed])
899 if not changed and not remove:
899 if not changed and not remove:
900 edittext.append("HG: no files changed")
900 edittext.append("HG: no files changed")
901 edittext.append("")
901 edittext.append("")
902 # run editor in the repository root
902 # run editor in the repository root
903 olddir = os.getcwd()
903 olddir = os.getcwd()
904 os.chdir(self.root)
904 os.chdir(self.root)
905 text = self.ui.edit("\n".join(edittext), user)
905 text = self.ui.edit("\n".join(edittext), user)
906 os.chdir(olddir)
906 os.chdir(olddir)
907
907
908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
909 while lines and not lines[0]:
909 while lines and not lines[0]:
910 del lines[0]
910 del lines[0]
911 if not lines and use_dirstate:
911 if not lines and use_dirstate:
912 raise util.Abort(_("empty commit message"))
912 raise util.Abort(_("empty commit message"))
913 text = '\n'.join(lines)
913 text = '\n'.join(lines)
914
914
915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
916 user, wctx.date(), extra)
916 user, wctx.date(), extra)
917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
918 parent2=xp2)
918 parent2=xp2)
919 tr.close()
919 tr.close()
920
920
921 if self.branchcache:
921 if self.branchcache:
922 self.branchtags()
922 self.branchtags()
923
923
924 if use_dirstate or update_dirstate:
924 if use_dirstate or update_dirstate:
925 self.dirstate.setparents(n)
925 self.dirstate.setparents(n)
926 if use_dirstate:
926 if use_dirstate:
927 for f in removed:
927 for f in removed:
928 self.dirstate.forget(f)
928 self.dirstate.forget(f)
929 valid = 1 # our dirstate updates are complete
929 valid = 1 # our dirstate updates are complete
930
930
931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 return n
932 return n
933 finally:
933 finally:
934 if not valid: # don't save our updated dirstate
934 if not valid: # don't save our updated dirstate
935 self.dirstate.invalidate()
935 self.dirstate.invalidate()
936 del tr
936 del tr
937
937
938 def walk(self, match, node=None):
938 def walk(self, match, node=None):
939 '''
939 '''
940 walk recursively through the directory tree or a given
940 walk recursively through the directory tree or a given
941 changeset, finding all files matched by the match
941 changeset, finding all files matched by the match
942 function
942 function
943 '''
943 '''
944 return self[node].walk(match)
944 return self[node].walk(match)
945
945
946 def status(self, node1=None, node2=None, match=None,
946 def status(self, node1='.', node2=None, match=None,
947 ignored=False, clean=False, unknown=False):
947 ignored=False, clean=False, unknown=False):
948 """return status of files between two nodes or node and working directory
948 """return status of files between two nodes or node and working directory
949
949
950 If node1 is None, use the first dirstate parent instead.
950 If node1 is None, use the first dirstate parent instead.
951 If node2 is None, compare node1 with working directory.
951 If node2 is None, compare node1 with working directory.
952 """
952 """
953
953
954 def fcmp(fn, getnode):
954 def mfmatches(ctx):
955 t1 = self.wread(fn)
955 mf = ctx.manifest().copy()
956 return self.file(fn).cmp(getnode(fn), t1)
956 for fn in mf:
957
958 def mfmatches(node):
959 change = self.changelog.read(node)
960 mf = self.manifest.read(change[0]).copy()
961 for fn in mf.keys():
962 if not match(fn):
957 if not match(fn):
963 del mf[fn]
958 del mf[fn]
964 return mf
959 return mf
965
960
966 if not match:
961 if not match:
967 match = match_.always(self.root, self.getcwd())
962 match = match_.always(self.root, self.getcwd())
968
963
964 ctx1 = self[node1]
965 ctx2 = self[node2]
966 working = ctx2 == self[None]
967 parentworking = working and ctx1 == self['.']
968
969 listignored, listclean, listunknown = ignored, clean, unknown
969 listignored, listclean, listunknown = ignored, clean, unknown
970 modified, added, removed, deleted, unknown = [], [], [], [], []
970 modified, added, removed, deleted, unknown = [], [], [], [], []
971 ignored, clean = [], []
971 ignored, clean = [], []
972
972
973 compareworking = False
973 if not parentworking:
974 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
975 compareworking = True
976
977 if not compareworking:
978 # read the manifest from node1 before the manifest from node2,
974 # read the manifest from node1 before the manifest from node2,
979 # so that we'll hit the manifest cache if we're going through
975 # so that we'll hit the manifest cache if we're going through
980 # all the revisions in parent->child order.
976 # all the revisions in parent->child order.
981 mf1 = mfmatches(node1)
977 mf1 = mfmatches(ctx1)
982
978
983 # are we comparing the working directory?
979 # are we comparing the working directory?
984 if not node2:
980 if working:
985 (lookup, modified, added, removed, deleted, unknown,
981 (lookup, modified, added, removed, deleted, unknown,
986 ignored, clean) = self.dirstate.status(match, listignored,
982 ignored, clean) = self.dirstate.status(match, listignored,
987 listclean, listunknown)
983 listclean, listunknown)
988 # are we comparing working dir against its parent?
984 # are we comparing working dir against its parent?
989 if compareworking:
985 if parentworking:
990 if lookup:
986 if lookup:
991 fixup = []
987 fixup = []
992 # do a full compare of any files that might have changed
988 # do a full compare of any files that might have changed
993 ctx = self['.']
994 ff = self.dirstate.flagfunc(ctx.flags)
995 for f in lookup:
989 for f in lookup:
996 if (f not in ctx or ff(f) != ctx.flags(f)
990 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
997 or ctx[f].cmp(self.wread(f))):
991 or ctx1[f].cmp(ctx2[f].read())):
998 modified.append(f)
992 modified.append(f)
999 else:
993 else:
1000 fixup.append(f)
994 fixup.append(f)
1001 if listclean:
995 if listclean:
1002 clean.append(f)
996 clean.append(f)
1003
997
1004 # update dirstate for files that are actually clean
998 # update dirstate for files that are actually clean
1005 if fixup:
999 if fixup:
1006 wlock = None
1000 wlock = None
1007 try:
1001 try:
1008 try:
1002 try:
1009 wlock = self.wlock(False)
1003 wlock = self.wlock(False)
1010 except lock.LockException:
1004 except lock.LockException:
1011 pass
1005 pass
1012 if wlock:
1006 if wlock:
1013 for f in fixup:
1007 for f in fixup:
1014 self.dirstate.normal(f)
1008 self.dirstate.normal(f)
1015 finally:
1009 finally:
1016 del wlock
1010 del wlock
1017 else:
1011 else:
1018 # we are comparing working dir against non-parent
1012 # we are comparing working dir against non-parent
1019 # generate a pseudo-manifest for the working dir
1013 # generate a pseudo-manifest for the working dir
1020 # XXX: create it in dirstate.py ?
1014 # XXX: create it in dirstate.py ?
1021 mf2 = mfmatches(self.dirstate.parents()[0])
1015 mf2 = mfmatches(self['.'])
1022 ff = self.dirstate.flagfunc(mf2.flags)
1023 for f in lookup + modified + added:
1016 for f in lookup + modified + added:
1024 mf2[f] = ""
1017 mf2[f] = None
1025 mf2.set(f, ff(f))
1018 mf2.set(f, ctx2.flags(f))
1026 for f in removed:
1019 for f in removed:
1027 if f in mf2:
1020 if f in mf2:
1028 del mf2[f]
1021 del mf2[f]
1029
1030 else:
1022 else:
1031 # we are comparing two revisions
1023 # we are comparing two revisions
1032 mf2 = mfmatches(node2)
1024 mf2 = mfmatches(ctx2)
1033
1025
1034 if not compareworking:
1026 if not parentworking:
1035 # flush lists from dirstate before comparing manifests
1027 # flush lists from dirstate before comparing manifests
1036 modified, added, clean = [], [], []
1028 modified, added, clean = [], [], []
1037
1029
1038 # make sure to sort the files so we talk to the disk in a
1030 # make sure to sort the files so we talk to the disk in a
1039 # reasonable order
1031 # reasonable order
1040 getnode = lambda fn: mf1.get(fn, nullid)
1041 for fn in util.sort(mf2):
1032 for fn in util.sort(mf2):
1042 if fn in mf1:
1033 if fn in mf1:
1043 if (mf1.flags(fn) != mf2.flags(fn) or
1034 if (mf1.flags(fn) != mf2.flags(fn) or
1044 (mf1[fn] != mf2[fn] and
1035 (mf1[fn] != mf2[fn] and
1045 (mf2[fn] != "" or fcmp(fn, getnode)))):
1036 (mf2[fn] or ctx1[f].cmp(ctx2[f].read())))):
1046 modified.append(fn)
1037 modified.append(fn)
1047 elif listclean:
1038 elif listclean:
1048 clean.append(fn)
1039 clean.append(fn)
1049 del mf1[fn]
1040 del mf1[fn]
1050 else:
1041 else:
1051 added.append(fn)
1042 added.append(fn)
1052
1043
1053 removed = mf1.keys()
1044 removed = mf1.keys()
1054
1045
1055 # sort and return results:
1046 # sort and return results:
1056 for l in modified, added, removed, deleted, unknown, ignored, clean:
1047 for l in modified, added, removed, deleted, unknown, ignored, clean:
1057 l.sort()
1048 l.sort()
1058 return (modified, added, removed, deleted, unknown, ignored, clean)
1049 return (modified, added, removed, deleted, unknown, ignored, clean)
1059
1050
1060 def add(self, list):
1051 def add(self, list):
1061 wlock = self.wlock()
1052 wlock = self.wlock()
1062 try:
1053 try:
1063 rejected = []
1054 rejected = []
1064 for f in list:
1055 for f in list:
1065 p = self.wjoin(f)
1056 p = self.wjoin(f)
1066 try:
1057 try:
1067 st = os.lstat(p)
1058 st = os.lstat(p)
1068 except:
1059 except:
1069 self.ui.warn(_("%s does not exist!\n") % f)
1060 self.ui.warn(_("%s does not exist!\n") % f)
1070 rejected.append(f)
1061 rejected.append(f)
1071 continue
1062 continue
1072 if st.st_size > 10000000:
1063 if st.st_size > 10000000:
1073 self.ui.warn(_("%s: files over 10MB may cause memory and"
1064 self.ui.warn(_("%s: files over 10MB may cause memory and"
1074 " performance problems\n"
1065 " performance problems\n"
1075 "(use 'hg revert %s' to unadd the file)\n")
1066 "(use 'hg revert %s' to unadd the file)\n")
1076 % (f, f))
1067 % (f, f))
1077 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1068 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1078 self.ui.warn(_("%s not added: only files and symlinks "
1069 self.ui.warn(_("%s not added: only files and symlinks "
1079 "supported currently\n") % f)
1070 "supported currently\n") % f)
1080 rejected.append(p)
1071 rejected.append(p)
1081 elif self.dirstate[f] in 'amn':
1072 elif self.dirstate[f] in 'amn':
1082 self.ui.warn(_("%s already tracked!\n") % f)
1073 self.ui.warn(_("%s already tracked!\n") % f)
1083 elif self.dirstate[f] == 'r':
1074 elif self.dirstate[f] == 'r':
1084 self.dirstate.normallookup(f)
1075 self.dirstate.normallookup(f)
1085 else:
1076 else:
1086 self.dirstate.add(f)
1077 self.dirstate.add(f)
1087 return rejected
1078 return rejected
1088 finally:
1079 finally:
1089 del wlock
1080 del wlock
1090
1081
1091 def forget(self, list):
1082 def forget(self, list):
1092 wlock = self.wlock()
1083 wlock = self.wlock()
1093 try:
1084 try:
1094 for f in list:
1085 for f in list:
1095 if self.dirstate[f] != 'a':
1086 if self.dirstate[f] != 'a':
1096 self.ui.warn(_("%s not added!\n") % f)
1087 self.ui.warn(_("%s not added!\n") % f)
1097 else:
1088 else:
1098 self.dirstate.forget(f)
1089 self.dirstate.forget(f)
1099 finally:
1090 finally:
1100 del wlock
1091 del wlock
1101
1092
1102 def remove(self, list, unlink=False):
1093 def remove(self, list, unlink=False):
1103 wlock = None
1094 wlock = None
1104 try:
1095 try:
1105 if unlink:
1096 if unlink:
1106 for f in list:
1097 for f in list:
1107 try:
1098 try:
1108 util.unlink(self.wjoin(f))
1099 util.unlink(self.wjoin(f))
1109 except OSError, inst:
1100 except OSError, inst:
1110 if inst.errno != errno.ENOENT:
1101 if inst.errno != errno.ENOENT:
1111 raise
1102 raise
1112 wlock = self.wlock()
1103 wlock = self.wlock()
1113 for f in list:
1104 for f in list:
1114 if unlink and os.path.exists(self.wjoin(f)):
1105 if unlink and os.path.exists(self.wjoin(f)):
1115 self.ui.warn(_("%s still exists!\n") % f)
1106 self.ui.warn(_("%s still exists!\n") % f)
1116 elif self.dirstate[f] == 'a':
1107 elif self.dirstate[f] == 'a':
1117 self.dirstate.forget(f)
1108 self.dirstate.forget(f)
1118 elif f not in self.dirstate:
1109 elif f not in self.dirstate:
1119 self.ui.warn(_("%s not tracked!\n") % f)
1110 self.ui.warn(_("%s not tracked!\n") % f)
1120 else:
1111 else:
1121 self.dirstate.remove(f)
1112 self.dirstate.remove(f)
1122 finally:
1113 finally:
1123 del wlock
1114 del wlock
1124
1115
1125 def undelete(self, list):
1116 def undelete(self, list):
1126 wlock = None
1117 wlock = None
1127 try:
1118 try:
1128 manifests = [self.manifest.read(self.changelog.read(p)[0])
1119 manifests = [self.manifest.read(self.changelog.read(p)[0])
1129 for p in self.dirstate.parents() if p != nullid]
1120 for p in self.dirstate.parents() if p != nullid]
1130 wlock = self.wlock()
1121 wlock = self.wlock()
1131 for f in list:
1122 for f in list:
1132 if self.dirstate[f] != 'r':
1123 if self.dirstate[f] != 'r':
1133 self.ui.warn("%s not removed!\n" % f)
1124 self.ui.warn("%s not removed!\n" % f)
1134 else:
1125 else:
1135 m = f in manifests[0] and manifests[0] or manifests[1]
1126 m = f in manifests[0] and manifests[0] or manifests[1]
1136 t = self.file(f).read(m[f])
1127 t = self.file(f).read(m[f])
1137 self.wwrite(f, t, m.flags(f))
1128 self.wwrite(f, t, m.flags(f))
1138 self.dirstate.normal(f)
1129 self.dirstate.normal(f)
1139 finally:
1130 finally:
1140 del wlock
1131 del wlock
1141
1132
1142 def copy(self, source, dest):
1133 def copy(self, source, dest):
1143 wlock = None
1134 wlock = None
1144 try:
1135 try:
1145 p = self.wjoin(dest)
1136 p = self.wjoin(dest)
1146 if not (os.path.exists(p) or os.path.islink(p)):
1137 if not (os.path.exists(p) or os.path.islink(p)):
1147 self.ui.warn(_("%s does not exist!\n") % dest)
1138 self.ui.warn(_("%s does not exist!\n") % dest)
1148 elif not (os.path.isfile(p) or os.path.islink(p)):
1139 elif not (os.path.isfile(p) or os.path.islink(p)):
1149 self.ui.warn(_("copy failed: %s is not a file or a "
1140 self.ui.warn(_("copy failed: %s is not a file or a "
1150 "symbolic link\n") % dest)
1141 "symbolic link\n") % dest)
1151 else:
1142 else:
1152 wlock = self.wlock()
1143 wlock = self.wlock()
1153 if dest not in self.dirstate:
1144 if dest not in self.dirstate:
1154 self.dirstate.add(dest)
1145 self.dirstate.add(dest)
1155 self.dirstate.copy(source, dest)
1146 self.dirstate.copy(source, dest)
1156 finally:
1147 finally:
1157 del wlock
1148 del wlock
1158
1149
1159 def heads(self, start=None):
1150 def heads(self, start=None):
1160 heads = self.changelog.heads(start)
1151 heads = self.changelog.heads(start)
1161 # sort the output in rev descending order
1152 # sort the output in rev descending order
1162 heads = [(-self.changelog.rev(h), h) for h in heads]
1153 heads = [(-self.changelog.rev(h), h) for h in heads]
1163 return [n for (r, n) in util.sort(heads)]
1154 return [n for (r, n) in util.sort(heads)]
1164
1155
1165 def branchheads(self, branch=None, start=None):
1156 def branchheads(self, branch=None, start=None):
1166 if branch is None:
1157 if branch is None:
1167 branch = self[None].branch()
1158 branch = self[None].branch()
1168 branches = self.branchtags()
1159 branches = self.branchtags()
1169 if branch not in branches:
1160 if branch not in branches:
1170 return []
1161 return []
1171 # The basic algorithm is this:
1162 # The basic algorithm is this:
1172 #
1163 #
1173 # Start from the branch tip since there are no later revisions that can
1164 # Start from the branch tip since there are no later revisions that can
1174 # possibly be in this branch, and the tip is a guaranteed head.
1165 # possibly be in this branch, and the tip is a guaranteed head.
1175 #
1166 #
1176 # Remember the tip's parents as the first ancestors, since these by
1167 # Remember the tip's parents as the first ancestors, since these by
1177 # definition are not heads.
1168 # definition are not heads.
1178 #
1169 #
1179 # Step backwards from the brach tip through all the revisions. We are
1170 # Step backwards from the brach tip through all the revisions. We are
1180 # guaranteed by the rules of Mercurial that we will now be visiting the
1171 # guaranteed by the rules of Mercurial that we will now be visiting the
1181 # nodes in reverse topological order (children before parents).
1172 # nodes in reverse topological order (children before parents).
1182 #
1173 #
1183 # If a revision is one of the ancestors of a head then we can toss it
1174 # If a revision is one of the ancestors of a head then we can toss it
1184 # out of the ancestors set (we've already found it and won't be
1175 # out of the ancestors set (we've already found it and won't be
1185 # visiting it again) and put its parents in the ancestors set.
1176 # visiting it again) and put its parents in the ancestors set.
1186 #
1177 #
1187 # Otherwise, if a revision is in the branch it's another head, since it
1178 # Otherwise, if a revision is in the branch it's another head, since it
1188 # wasn't in the ancestor list of an existing head. So add it to the
1179 # wasn't in the ancestor list of an existing head. So add it to the
1189 # head list, and add its parents to the ancestor list.
1180 # head list, and add its parents to the ancestor list.
1190 #
1181 #
1191 # If it is not in the branch ignore it.
1182 # If it is not in the branch ignore it.
1192 #
1183 #
1193 # Once we have a list of heads, use nodesbetween to filter out all the
1184 # Once we have a list of heads, use nodesbetween to filter out all the
1194 # heads that cannot be reached from startrev. There may be a more
1185 # heads that cannot be reached from startrev. There may be a more
1195 # efficient way to do this as part of the previous algorithm.
1186 # efficient way to do this as part of the previous algorithm.
1196
1187
1197 set = util.set
1188 set = util.set
1198 heads = [self.changelog.rev(branches[branch])]
1189 heads = [self.changelog.rev(branches[branch])]
1199 # Don't care if ancestors contains nullrev or not.
1190 # Don't care if ancestors contains nullrev or not.
1200 ancestors = set(self.changelog.parentrevs(heads[0]))
1191 ancestors = set(self.changelog.parentrevs(heads[0]))
1201 for rev in xrange(heads[0] - 1, nullrev, -1):
1192 for rev in xrange(heads[0] - 1, nullrev, -1):
1202 if rev in ancestors:
1193 if rev in ancestors:
1203 ancestors.update(self.changelog.parentrevs(rev))
1194 ancestors.update(self.changelog.parentrevs(rev))
1204 ancestors.remove(rev)
1195 ancestors.remove(rev)
1205 elif self[rev].branch() == branch:
1196 elif self[rev].branch() == branch:
1206 heads.append(rev)
1197 heads.append(rev)
1207 ancestors.update(self.changelog.parentrevs(rev))
1198 ancestors.update(self.changelog.parentrevs(rev))
1208 heads = [self.changelog.node(rev) for rev in heads]
1199 heads = [self.changelog.node(rev) for rev in heads]
1209 if start is not None:
1200 if start is not None:
1210 heads = self.changelog.nodesbetween([start], heads)[2]
1201 heads = self.changelog.nodesbetween([start], heads)[2]
1211 return heads
1202 return heads
1212
1203
1213 def branches(self, nodes):
1204 def branches(self, nodes):
1214 if not nodes:
1205 if not nodes:
1215 nodes = [self.changelog.tip()]
1206 nodes = [self.changelog.tip()]
1216 b = []
1207 b = []
1217 for n in nodes:
1208 for n in nodes:
1218 t = n
1209 t = n
1219 while 1:
1210 while 1:
1220 p = self.changelog.parents(n)
1211 p = self.changelog.parents(n)
1221 if p[1] != nullid or p[0] == nullid:
1212 if p[1] != nullid or p[0] == nullid:
1222 b.append((t, n, p[0], p[1]))
1213 b.append((t, n, p[0], p[1]))
1223 break
1214 break
1224 n = p[0]
1215 n = p[0]
1225 return b
1216 return b
1226
1217
1227 def between(self, pairs):
1218 def between(self, pairs):
1228 r = []
1219 r = []
1229
1220
1230 for top, bottom in pairs:
1221 for top, bottom in pairs:
1231 n, l, i = top, [], 0
1222 n, l, i = top, [], 0
1232 f = 1
1223 f = 1
1233
1224
1234 while n != bottom:
1225 while n != bottom:
1235 p = self.changelog.parents(n)[0]
1226 p = self.changelog.parents(n)[0]
1236 if i == f:
1227 if i == f:
1237 l.append(n)
1228 l.append(n)
1238 f = f * 2
1229 f = f * 2
1239 n = p
1230 n = p
1240 i += 1
1231 i += 1
1241
1232
1242 r.append(l)
1233 r.append(l)
1243
1234
1244 return r
1235 return r
1245
1236
1246 def findincoming(self, remote, base=None, heads=None, force=False):
1237 def findincoming(self, remote, base=None, heads=None, force=False):
1247 """Return list of roots of the subsets of missing nodes from remote
1238 """Return list of roots of the subsets of missing nodes from remote
1248
1239
1249 If base dict is specified, assume that these nodes and their parents
1240 If base dict is specified, assume that these nodes and their parents
1250 exist on the remote side and that no child of a node of base exists
1241 exist on the remote side and that no child of a node of base exists
1251 in both remote and self.
1242 in both remote and self.
1252 Furthermore base will be updated to include the nodes that exists
1243 Furthermore base will be updated to include the nodes that exists
1253 in self and remote but no children exists in self and remote.
1244 in self and remote but no children exists in self and remote.
1254 If a list of heads is specified, return only nodes which are heads
1245 If a list of heads is specified, return only nodes which are heads
1255 or ancestors of these heads.
1246 or ancestors of these heads.
1256
1247
1257 All the ancestors of base are in self and in remote.
1248 All the ancestors of base are in self and in remote.
1258 All the descendants of the list returned are missing in self.
1249 All the descendants of the list returned are missing in self.
1259 (and so we know that the rest of the nodes are missing in remote, see
1250 (and so we know that the rest of the nodes are missing in remote, see
1260 outgoing)
1251 outgoing)
1261 """
1252 """
1262 m = self.changelog.nodemap
1253 m = self.changelog.nodemap
1263 search = []
1254 search = []
1264 fetch = {}
1255 fetch = {}
1265 seen = {}
1256 seen = {}
1266 seenbranch = {}
1257 seenbranch = {}
1267 if base == None:
1258 if base == None:
1268 base = {}
1259 base = {}
1269
1260
1270 if not heads:
1261 if not heads:
1271 heads = remote.heads()
1262 heads = remote.heads()
1272
1263
1273 if self.changelog.tip() == nullid:
1264 if self.changelog.tip() == nullid:
1274 base[nullid] = 1
1265 base[nullid] = 1
1275 if heads != [nullid]:
1266 if heads != [nullid]:
1276 return [nullid]
1267 return [nullid]
1277 return []
1268 return []
1278
1269
1279 # assume we're closer to the tip than the root
1270 # assume we're closer to the tip than the root
1280 # and start by examining the heads
1271 # and start by examining the heads
1281 self.ui.status(_("searching for changes\n"))
1272 self.ui.status(_("searching for changes\n"))
1282
1273
1283 unknown = []
1274 unknown = []
1284 for h in heads:
1275 for h in heads:
1285 if h not in m:
1276 if h not in m:
1286 unknown.append(h)
1277 unknown.append(h)
1287 else:
1278 else:
1288 base[h] = 1
1279 base[h] = 1
1289
1280
1290 if not unknown:
1281 if not unknown:
1291 return []
1282 return []
1292
1283
1293 req = dict.fromkeys(unknown)
1284 req = dict.fromkeys(unknown)
1294 reqcnt = 0
1285 reqcnt = 0
1295
1286
1296 # search through remote branches
1287 # search through remote branches
1297 # a 'branch' here is a linear segment of history, with four parts:
1288 # a 'branch' here is a linear segment of history, with four parts:
1298 # head, root, first parent, second parent
1289 # head, root, first parent, second parent
1299 # (a branch always has two parents (or none) by definition)
1290 # (a branch always has two parents (or none) by definition)
1300 unknown = remote.branches(unknown)
1291 unknown = remote.branches(unknown)
1301 while unknown:
1292 while unknown:
1302 r = []
1293 r = []
1303 while unknown:
1294 while unknown:
1304 n = unknown.pop(0)
1295 n = unknown.pop(0)
1305 if n[0] in seen:
1296 if n[0] in seen:
1306 continue
1297 continue
1307
1298
1308 self.ui.debug(_("examining %s:%s\n")
1299 self.ui.debug(_("examining %s:%s\n")
1309 % (short(n[0]), short(n[1])))
1300 % (short(n[0]), short(n[1])))
1310 if n[0] == nullid: # found the end of the branch
1301 if n[0] == nullid: # found the end of the branch
1311 pass
1302 pass
1312 elif n in seenbranch:
1303 elif n in seenbranch:
1313 self.ui.debug(_("branch already found\n"))
1304 self.ui.debug(_("branch already found\n"))
1314 continue
1305 continue
1315 elif n[1] and n[1] in m: # do we know the base?
1306 elif n[1] and n[1] in m: # do we know the base?
1316 self.ui.debug(_("found incomplete branch %s:%s\n")
1307 self.ui.debug(_("found incomplete branch %s:%s\n")
1317 % (short(n[0]), short(n[1])))
1308 % (short(n[0]), short(n[1])))
1318 search.append(n) # schedule branch range for scanning
1309 search.append(n) # schedule branch range for scanning
1319 seenbranch[n] = 1
1310 seenbranch[n] = 1
1320 else:
1311 else:
1321 if n[1] not in seen and n[1] not in fetch:
1312 if n[1] not in seen and n[1] not in fetch:
1322 if n[2] in m and n[3] in m:
1313 if n[2] in m and n[3] in m:
1323 self.ui.debug(_("found new changeset %s\n") %
1314 self.ui.debug(_("found new changeset %s\n") %
1324 short(n[1]))
1315 short(n[1]))
1325 fetch[n[1]] = 1 # earliest unknown
1316 fetch[n[1]] = 1 # earliest unknown
1326 for p in n[2:4]:
1317 for p in n[2:4]:
1327 if p in m:
1318 if p in m:
1328 base[p] = 1 # latest known
1319 base[p] = 1 # latest known
1329
1320
1330 for p in n[2:4]:
1321 for p in n[2:4]:
1331 if p not in req and p not in m:
1322 if p not in req and p not in m:
1332 r.append(p)
1323 r.append(p)
1333 req[p] = 1
1324 req[p] = 1
1334 seen[n[0]] = 1
1325 seen[n[0]] = 1
1335
1326
1336 if r:
1327 if r:
1337 reqcnt += 1
1328 reqcnt += 1
1338 self.ui.debug(_("request %d: %s\n") %
1329 self.ui.debug(_("request %d: %s\n") %
1339 (reqcnt, " ".join(map(short, r))))
1330 (reqcnt, " ".join(map(short, r))))
1340 for p in xrange(0, len(r), 10):
1331 for p in xrange(0, len(r), 10):
1341 for b in remote.branches(r[p:p+10]):
1332 for b in remote.branches(r[p:p+10]):
1342 self.ui.debug(_("received %s:%s\n") %
1333 self.ui.debug(_("received %s:%s\n") %
1343 (short(b[0]), short(b[1])))
1334 (short(b[0]), short(b[1])))
1344 unknown.append(b)
1335 unknown.append(b)
1345
1336
1346 # do binary search on the branches we found
1337 # do binary search on the branches we found
1347 while search:
1338 while search:
1348 n = search.pop(0)
1339 n = search.pop(0)
1349 reqcnt += 1
1340 reqcnt += 1
1350 l = remote.between([(n[0], n[1])])[0]
1341 l = remote.between([(n[0], n[1])])[0]
1351 l.append(n[1])
1342 l.append(n[1])
1352 p = n[0]
1343 p = n[0]
1353 f = 1
1344 f = 1
1354 for i in l:
1345 for i in l:
1355 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1346 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1356 if i in m:
1347 if i in m:
1357 if f <= 2:
1348 if f <= 2:
1358 self.ui.debug(_("found new branch changeset %s\n") %
1349 self.ui.debug(_("found new branch changeset %s\n") %
1359 short(p))
1350 short(p))
1360 fetch[p] = 1
1351 fetch[p] = 1
1361 base[i] = 1
1352 base[i] = 1
1362 else:
1353 else:
1363 self.ui.debug(_("narrowed branch search to %s:%s\n")
1354 self.ui.debug(_("narrowed branch search to %s:%s\n")
1364 % (short(p), short(i)))
1355 % (short(p), short(i)))
1365 search.append((p, i))
1356 search.append((p, i))
1366 break
1357 break
1367 p, f = i, f * 2
1358 p, f = i, f * 2
1368
1359
1369 # sanity check our fetch list
1360 # sanity check our fetch list
1370 for f in fetch.keys():
1361 for f in fetch.keys():
1371 if f in m:
1362 if f in m:
1372 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1363 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1373
1364
1374 if base.keys() == [nullid]:
1365 if base.keys() == [nullid]:
1375 if force:
1366 if force:
1376 self.ui.warn(_("warning: repository is unrelated\n"))
1367 self.ui.warn(_("warning: repository is unrelated\n"))
1377 else:
1368 else:
1378 raise util.Abort(_("repository is unrelated"))
1369 raise util.Abort(_("repository is unrelated"))
1379
1370
1380 self.ui.debug(_("found new changesets starting at ") +
1371 self.ui.debug(_("found new changesets starting at ") +
1381 " ".join([short(f) for f in fetch]) + "\n")
1372 " ".join([short(f) for f in fetch]) + "\n")
1382
1373
1383 self.ui.debug(_("%d total queries\n") % reqcnt)
1374 self.ui.debug(_("%d total queries\n") % reqcnt)
1384
1375
1385 return fetch.keys()
1376 return fetch.keys()
1386
1377
1387 def findoutgoing(self, remote, base=None, heads=None, force=False):
1378 def findoutgoing(self, remote, base=None, heads=None, force=False):
1388 """Return list of nodes that are roots of subsets not in remote
1379 """Return list of nodes that are roots of subsets not in remote
1389
1380
1390 If base dict is specified, assume that these nodes and their parents
1381 If base dict is specified, assume that these nodes and their parents
1391 exist on the remote side.
1382 exist on the remote side.
1392 If a list of heads is specified, return only nodes which are heads
1383 If a list of heads is specified, return only nodes which are heads
1393 or ancestors of these heads, and return a second element which
1384 or ancestors of these heads, and return a second element which
1394 contains all remote heads which get new children.
1385 contains all remote heads which get new children.
1395 """
1386 """
1396 if base == None:
1387 if base == None:
1397 base = {}
1388 base = {}
1398 self.findincoming(remote, base, heads, force=force)
1389 self.findincoming(remote, base, heads, force=force)
1399
1390
1400 self.ui.debug(_("common changesets up to ")
1391 self.ui.debug(_("common changesets up to ")
1401 + " ".join(map(short, base.keys())) + "\n")
1392 + " ".join(map(short, base.keys())) + "\n")
1402
1393
1403 remain = dict.fromkeys(self.changelog.nodemap)
1394 remain = dict.fromkeys(self.changelog.nodemap)
1404
1395
1405 # prune everything remote has from the tree
1396 # prune everything remote has from the tree
1406 del remain[nullid]
1397 del remain[nullid]
1407 remove = base.keys()
1398 remove = base.keys()
1408 while remove:
1399 while remove:
1409 n = remove.pop(0)
1400 n = remove.pop(0)
1410 if n in remain:
1401 if n in remain:
1411 del remain[n]
1402 del remain[n]
1412 for p in self.changelog.parents(n):
1403 for p in self.changelog.parents(n):
1413 remove.append(p)
1404 remove.append(p)
1414
1405
1415 # find every node whose parents have been pruned
1406 # find every node whose parents have been pruned
1416 subset = []
1407 subset = []
1417 # find every remote head that will get new children
1408 # find every remote head that will get new children
1418 updated_heads = {}
1409 updated_heads = {}
1419 for n in remain:
1410 for n in remain:
1420 p1, p2 = self.changelog.parents(n)
1411 p1, p2 = self.changelog.parents(n)
1421 if p1 not in remain and p2 not in remain:
1412 if p1 not in remain and p2 not in remain:
1422 subset.append(n)
1413 subset.append(n)
1423 if heads:
1414 if heads:
1424 if p1 in heads:
1415 if p1 in heads:
1425 updated_heads[p1] = True
1416 updated_heads[p1] = True
1426 if p2 in heads:
1417 if p2 in heads:
1427 updated_heads[p2] = True
1418 updated_heads[p2] = True
1428
1419
1429 # this is the set of all roots we have to push
1420 # this is the set of all roots we have to push
1430 if heads:
1421 if heads:
1431 return subset, updated_heads.keys()
1422 return subset, updated_heads.keys()
1432 else:
1423 else:
1433 return subset
1424 return subset
1434
1425
1435 def pull(self, remote, heads=None, force=False):
1426 def pull(self, remote, heads=None, force=False):
1436 lock = self.lock()
1427 lock = self.lock()
1437 try:
1428 try:
1438 fetch = self.findincoming(remote, heads=heads, force=force)
1429 fetch = self.findincoming(remote, heads=heads, force=force)
1439 if fetch == [nullid]:
1430 if fetch == [nullid]:
1440 self.ui.status(_("requesting all changes\n"))
1431 self.ui.status(_("requesting all changes\n"))
1441
1432
1442 if not fetch:
1433 if not fetch:
1443 self.ui.status(_("no changes found\n"))
1434 self.ui.status(_("no changes found\n"))
1444 return 0
1435 return 0
1445
1436
1446 if heads is None:
1437 if heads is None:
1447 cg = remote.changegroup(fetch, 'pull')
1438 cg = remote.changegroup(fetch, 'pull')
1448 else:
1439 else:
1449 if 'changegroupsubset' not in remote.capabilities:
1440 if 'changegroupsubset' not in remote.capabilities:
1450 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1441 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1451 cg = remote.changegroupsubset(fetch, heads, 'pull')
1442 cg = remote.changegroupsubset(fetch, heads, 'pull')
1452 return self.addchangegroup(cg, 'pull', remote.url())
1443 return self.addchangegroup(cg, 'pull', remote.url())
1453 finally:
1444 finally:
1454 del lock
1445 del lock
1455
1446
1456 def push(self, remote, force=False, revs=None):
1447 def push(self, remote, force=False, revs=None):
1457 # there are two ways to push to remote repo:
1448 # there are two ways to push to remote repo:
1458 #
1449 #
1459 # addchangegroup assumes local user can lock remote
1450 # addchangegroup assumes local user can lock remote
1460 # repo (local filesystem, old ssh servers).
1451 # repo (local filesystem, old ssh servers).
1461 #
1452 #
1462 # unbundle assumes local user cannot lock remote repo (new ssh
1453 # unbundle assumes local user cannot lock remote repo (new ssh
1463 # servers, http servers).
1454 # servers, http servers).
1464
1455
1465 if remote.capable('unbundle'):
1456 if remote.capable('unbundle'):
1466 return self.push_unbundle(remote, force, revs)
1457 return self.push_unbundle(remote, force, revs)
1467 return self.push_addchangegroup(remote, force, revs)
1458 return self.push_addchangegroup(remote, force, revs)
1468
1459
1469 def prepush(self, remote, force, revs):
1460 def prepush(self, remote, force, revs):
1470 base = {}
1461 base = {}
1471 remote_heads = remote.heads()
1462 remote_heads = remote.heads()
1472 inc = self.findincoming(remote, base, remote_heads, force=force)
1463 inc = self.findincoming(remote, base, remote_heads, force=force)
1473
1464
1474 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1465 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1475 if revs is not None:
1466 if revs is not None:
1476 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1467 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1477 else:
1468 else:
1478 bases, heads = update, self.changelog.heads()
1469 bases, heads = update, self.changelog.heads()
1479
1470
1480 if not bases:
1471 if not bases:
1481 self.ui.status(_("no changes found\n"))
1472 self.ui.status(_("no changes found\n"))
1482 return None, 1
1473 return None, 1
1483 elif not force:
1474 elif not force:
1484 # check if we're creating new remote heads
1475 # check if we're creating new remote heads
1485 # to be a remote head after push, node must be either
1476 # to be a remote head after push, node must be either
1486 # - unknown locally
1477 # - unknown locally
1487 # - a local outgoing head descended from update
1478 # - a local outgoing head descended from update
1488 # - a remote head that's known locally and not
1479 # - a remote head that's known locally and not
1489 # ancestral to an outgoing head
1480 # ancestral to an outgoing head
1490
1481
1491 warn = 0
1482 warn = 0
1492
1483
1493 if remote_heads == [nullid]:
1484 if remote_heads == [nullid]:
1494 warn = 0
1485 warn = 0
1495 elif not revs and len(heads) > len(remote_heads):
1486 elif not revs and len(heads) > len(remote_heads):
1496 warn = 1
1487 warn = 1
1497 else:
1488 else:
1498 newheads = list(heads)
1489 newheads = list(heads)
1499 for r in remote_heads:
1490 for r in remote_heads:
1500 if r in self.changelog.nodemap:
1491 if r in self.changelog.nodemap:
1501 desc = self.changelog.heads(r, heads)
1492 desc = self.changelog.heads(r, heads)
1502 l = [h for h in heads if h in desc]
1493 l = [h for h in heads if h in desc]
1503 if not l:
1494 if not l:
1504 newheads.append(r)
1495 newheads.append(r)
1505 else:
1496 else:
1506 newheads.append(r)
1497 newheads.append(r)
1507 if len(newheads) > len(remote_heads):
1498 if len(newheads) > len(remote_heads):
1508 warn = 1
1499 warn = 1
1509
1500
1510 if warn:
1501 if warn:
1511 self.ui.warn(_("abort: push creates new remote heads!\n"))
1502 self.ui.warn(_("abort: push creates new remote heads!\n"))
1512 self.ui.status(_("(did you forget to merge?"
1503 self.ui.status(_("(did you forget to merge?"
1513 " use push -f to force)\n"))
1504 " use push -f to force)\n"))
1514 return None, 0
1505 return None, 0
1515 elif inc:
1506 elif inc:
1516 self.ui.warn(_("note: unsynced remote changes!\n"))
1507 self.ui.warn(_("note: unsynced remote changes!\n"))
1517
1508
1518
1509
1519 if revs is None:
1510 if revs is None:
1520 cg = self.changegroup(update, 'push')
1511 cg = self.changegroup(update, 'push')
1521 else:
1512 else:
1522 cg = self.changegroupsubset(update, revs, 'push')
1513 cg = self.changegroupsubset(update, revs, 'push')
1523 return cg, remote_heads
1514 return cg, remote_heads
1524
1515
1525 def push_addchangegroup(self, remote, force, revs):
1516 def push_addchangegroup(self, remote, force, revs):
1526 lock = remote.lock()
1517 lock = remote.lock()
1527 try:
1518 try:
1528 ret = self.prepush(remote, force, revs)
1519 ret = self.prepush(remote, force, revs)
1529 if ret[0] is not None:
1520 if ret[0] is not None:
1530 cg, remote_heads = ret
1521 cg, remote_heads = ret
1531 return remote.addchangegroup(cg, 'push', self.url())
1522 return remote.addchangegroup(cg, 'push', self.url())
1532 return ret[1]
1523 return ret[1]
1533 finally:
1524 finally:
1534 del lock
1525 del lock
1535
1526
1536 def push_unbundle(self, remote, force, revs):
1527 def push_unbundle(self, remote, force, revs):
1537 # local repo finds heads on server, finds out what revs it
1528 # local repo finds heads on server, finds out what revs it
1538 # must push. once revs transferred, if server finds it has
1529 # must push. once revs transferred, if server finds it has
1539 # different heads (someone else won commit/push race), server
1530 # different heads (someone else won commit/push race), server
1540 # aborts.
1531 # aborts.
1541
1532
1542 ret = self.prepush(remote, force, revs)
1533 ret = self.prepush(remote, force, revs)
1543 if ret[0] is not None:
1534 if ret[0] is not None:
1544 cg, remote_heads = ret
1535 cg, remote_heads = ret
1545 if force: remote_heads = ['force']
1536 if force: remote_heads = ['force']
1546 return remote.unbundle(cg, remote_heads, 'push')
1537 return remote.unbundle(cg, remote_heads, 'push')
1547 return ret[1]
1538 return ret[1]
1548
1539
1549 def changegroupinfo(self, nodes, source):
1540 def changegroupinfo(self, nodes, source):
1550 if self.ui.verbose or source == 'bundle':
1541 if self.ui.verbose or source == 'bundle':
1551 self.ui.status(_("%d changesets found\n") % len(nodes))
1542 self.ui.status(_("%d changesets found\n") % len(nodes))
1552 if self.ui.debugflag:
1543 if self.ui.debugflag:
1553 self.ui.debug(_("List of changesets:\n"))
1544 self.ui.debug(_("List of changesets:\n"))
1554 for node in nodes:
1545 for node in nodes:
1555 self.ui.debug("%s\n" % hex(node))
1546 self.ui.debug("%s\n" % hex(node))
1556
1547
1557 def changegroupsubset(self, bases, heads, source, extranodes=None):
1548 def changegroupsubset(self, bases, heads, source, extranodes=None):
1558 """This function generates a changegroup consisting of all the nodes
1549 """This function generates a changegroup consisting of all the nodes
1559 that are descendents of any of the bases, and ancestors of any of
1550 that are descendents of any of the bases, and ancestors of any of
1560 the heads.
1551 the heads.
1561
1552
1562 It is fairly complex as determining which filenodes and which
1553 It is fairly complex as determining which filenodes and which
1563 manifest nodes need to be included for the changeset to be complete
1554 manifest nodes need to be included for the changeset to be complete
1564 is non-trivial.
1555 is non-trivial.
1565
1556
1566 Another wrinkle is doing the reverse, figuring out which changeset in
1557 Another wrinkle is doing the reverse, figuring out which changeset in
1567 the changegroup a particular filenode or manifestnode belongs to.
1558 the changegroup a particular filenode or manifestnode belongs to.
1568
1559
1569 The caller can specify some nodes that must be included in the
1560 The caller can specify some nodes that must be included in the
1570 changegroup using the extranodes argument. It should be a dict
1561 changegroup using the extranodes argument. It should be a dict
1571 where the keys are the filenames (or 1 for the manifest), and the
1562 where the keys are the filenames (or 1 for the manifest), and the
1572 values are lists of (node, linknode) tuples, where node is a wanted
1563 values are lists of (node, linknode) tuples, where node is a wanted
1573 node and linknode is the changelog node that should be transmitted as
1564 node and linknode is the changelog node that should be transmitted as
1574 the linkrev.
1565 the linkrev.
1575 """
1566 """
1576
1567
1577 self.hook('preoutgoing', throw=True, source=source)
1568 self.hook('preoutgoing', throw=True, source=source)
1578
1569
1579 # Set up some initial variables
1570 # Set up some initial variables
1580 # Make it easy to refer to self.changelog
1571 # Make it easy to refer to self.changelog
1581 cl = self.changelog
1572 cl = self.changelog
1582 # msng is short for missing - compute the list of changesets in this
1573 # msng is short for missing - compute the list of changesets in this
1583 # changegroup.
1574 # changegroup.
1584 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1575 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1585 self.changegroupinfo(msng_cl_lst, source)
1576 self.changegroupinfo(msng_cl_lst, source)
1586 # Some bases may turn out to be superfluous, and some heads may be
1577 # Some bases may turn out to be superfluous, and some heads may be
1587 # too. nodesbetween will return the minimal set of bases and heads
1578 # too. nodesbetween will return the minimal set of bases and heads
1588 # necessary to re-create the changegroup.
1579 # necessary to re-create the changegroup.
1589
1580
1590 # Known heads are the list of heads that it is assumed the recipient
1581 # Known heads are the list of heads that it is assumed the recipient
1591 # of this changegroup will know about.
1582 # of this changegroup will know about.
1592 knownheads = {}
1583 knownheads = {}
1593 # We assume that all parents of bases are known heads.
1584 # We assume that all parents of bases are known heads.
1594 for n in bases:
1585 for n in bases:
1595 for p in cl.parents(n):
1586 for p in cl.parents(n):
1596 if p != nullid:
1587 if p != nullid:
1597 knownheads[p] = 1
1588 knownheads[p] = 1
1598 knownheads = knownheads.keys()
1589 knownheads = knownheads.keys()
1599 if knownheads:
1590 if knownheads:
1600 # Now that we know what heads are known, we can compute which
1591 # Now that we know what heads are known, we can compute which
1601 # changesets are known. The recipient must know about all
1592 # changesets are known. The recipient must know about all
1602 # changesets required to reach the known heads from the null
1593 # changesets required to reach the known heads from the null
1603 # changeset.
1594 # changeset.
1604 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1595 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1605 junk = None
1596 junk = None
1606 # Transform the list into an ersatz set.
1597 # Transform the list into an ersatz set.
1607 has_cl_set = dict.fromkeys(has_cl_set)
1598 has_cl_set = dict.fromkeys(has_cl_set)
1608 else:
1599 else:
1609 # If there were no known heads, the recipient cannot be assumed to
1600 # If there were no known heads, the recipient cannot be assumed to
1610 # know about any changesets.
1601 # know about any changesets.
1611 has_cl_set = {}
1602 has_cl_set = {}
1612
1603
1613 # Make it easy to refer to self.manifest
1604 # Make it easy to refer to self.manifest
1614 mnfst = self.manifest
1605 mnfst = self.manifest
1615 # We don't know which manifests are missing yet
1606 # We don't know which manifests are missing yet
1616 msng_mnfst_set = {}
1607 msng_mnfst_set = {}
1617 # Nor do we know which filenodes are missing.
1608 # Nor do we know which filenodes are missing.
1618 msng_filenode_set = {}
1609 msng_filenode_set = {}
1619
1610
1620 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1611 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1621 junk = None
1612 junk = None
1622
1613
1623 # A changeset always belongs to itself, so the changenode lookup
1614 # A changeset always belongs to itself, so the changenode lookup
1624 # function for a changenode is identity.
1615 # function for a changenode is identity.
1625 def identity(x):
1616 def identity(x):
1626 return x
1617 return x
1627
1618
1628 # A function generating function. Sets up an environment for the
1619 # A function generating function. Sets up an environment for the
1629 # inner function.
1620 # inner function.
1630 def cmp_by_rev_func(revlog):
1621 def cmp_by_rev_func(revlog):
1631 # Compare two nodes by their revision number in the environment's
1622 # Compare two nodes by their revision number in the environment's
1632 # revision history. Since the revision number both represents the
1623 # revision history. Since the revision number both represents the
1633 # most efficient order to read the nodes in, and represents a
1624 # most efficient order to read the nodes in, and represents a
1634 # topological sorting of the nodes, this function is often useful.
1625 # topological sorting of the nodes, this function is often useful.
1635 def cmp_by_rev(a, b):
1626 def cmp_by_rev(a, b):
1636 return cmp(revlog.rev(a), revlog.rev(b))
1627 return cmp(revlog.rev(a), revlog.rev(b))
1637 return cmp_by_rev
1628 return cmp_by_rev
1638
1629
1639 # If we determine that a particular file or manifest node must be a
1630 # If we determine that a particular file or manifest node must be a
1640 # node that the recipient of the changegroup will already have, we can
1631 # node that the recipient of the changegroup will already have, we can
1641 # also assume the recipient will have all the parents. This function
1632 # also assume the recipient will have all the parents. This function
1642 # prunes them from the set of missing nodes.
1633 # prunes them from the set of missing nodes.
1643 def prune_parents(revlog, hasset, msngset):
1634 def prune_parents(revlog, hasset, msngset):
1644 haslst = hasset.keys()
1635 haslst = hasset.keys()
1645 haslst.sort(cmp_by_rev_func(revlog))
1636 haslst.sort(cmp_by_rev_func(revlog))
1646 for node in haslst:
1637 for node in haslst:
1647 parentlst = [p for p in revlog.parents(node) if p != nullid]
1638 parentlst = [p for p in revlog.parents(node) if p != nullid]
1648 while parentlst:
1639 while parentlst:
1649 n = parentlst.pop()
1640 n = parentlst.pop()
1650 if n not in hasset:
1641 if n not in hasset:
1651 hasset[n] = 1
1642 hasset[n] = 1
1652 p = [p for p in revlog.parents(n) if p != nullid]
1643 p = [p for p in revlog.parents(n) if p != nullid]
1653 parentlst.extend(p)
1644 parentlst.extend(p)
1654 for n in hasset:
1645 for n in hasset:
1655 msngset.pop(n, None)
1646 msngset.pop(n, None)
1656
1647
1657 # This is a function generating function used to set up an environment
1648 # This is a function generating function used to set up an environment
1658 # for the inner function to execute in.
1649 # for the inner function to execute in.
1659 def manifest_and_file_collector(changedfileset):
1650 def manifest_and_file_collector(changedfileset):
1660 # This is an information gathering function that gathers
1651 # This is an information gathering function that gathers
1661 # information from each changeset node that goes out as part of
1652 # information from each changeset node that goes out as part of
1662 # the changegroup. The information gathered is a list of which
1653 # the changegroup. The information gathered is a list of which
1663 # manifest nodes are potentially required (the recipient may
1654 # manifest nodes are potentially required (the recipient may
1664 # already have them) and total list of all files which were
1655 # already have them) and total list of all files which were
1665 # changed in any changeset in the changegroup.
1656 # changed in any changeset in the changegroup.
1666 #
1657 #
1667 # We also remember the first changenode we saw any manifest
1658 # We also remember the first changenode we saw any manifest
1668 # referenced by so we can later determine which changenode 'owns'
1659 # referenced by so we can later determine which changenode 'owns'
1669 # the manifest.
1660 # the manifest.
1670 def collect_manifests_and_files(clnode):
1661 def collect_manifests_and_files(clnode):
1671 c = cl.read(clnode)
1662 c = cl.read(clnode)
1672 for f in c[3]:
1663 for f in c[3]:
1673 # This is to make sure we only have one instance of each
1664 # This is to make sure we only have one instance of each
1674 # filename string for each filename.
1665 # filename string for each filename.
1675 changedfileset.setdefault(f, f)
1666 changedfileset.setdefault(f, f)
1676 msng_mnfst_set.setdefault(c[0], clnode)
1667 msng_mnfst_set.setdefault(c[0], clnode)
1677 return collect_manifests_and_files
1668 return collect_manifests_and_files
1678
1669
1679 # Figure out which manifest nodes (of the ones we think might be part
1670 # Figure out which manifest nodes (of the ones we think might be part
1680 # of the changegroup) the recipient must know about and remove them
1671 # of the changegroup) the recipient must know about and remove them
1681 # from the changegroup.
1672 # from the changegroup.
1682 def prune_manifests():
1673 def prune_manifests():
1683 has_mnfst_set = {}
1674 has_mnfst_set = {}
1684 for n in msng_mnfst_set:
1675 for n in msng_mnfst_set:
1685 # If a 'missing' manifest thinks it belongs to a changenode
1676 # If a 'missing' manifest thinks it belongs to a changenode
1686 # the recipient is assumed to have, obviously the recipient
1677 # the recipient is assumed to have, obviously the recipient
1687 # must have that manifest.
1678 # must have that manifest.
1688 linknode = cl.node(mnfst.linkrev(n))
1679 linknode = cl.node(mnfst.linkrev(n))
1689 if linknode in has_cl_set:
1680 if linknode in has_cl_set:
1690 has_mnfst_set[n] = 1
1681 has_mnfst_set[n] = 1
1691 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1682 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1692
1683
1693 # Use the information collected in collect_manifests_and_files to say
1684 # Use the information collected in collect_manifests_and_files to say
1694 # which changenode any manifestnode belongs to.
1685 # which changenode any manifestnode belongs to.
1695 def lookup_manifest_link(mnfstnode):
1686 def lookup_manifest_link(mnfstnode):
1696 return msng_mnfst_set[mnfstnode]
1687 return msng_mnfst_set[mnfstnode]
1697
1688
1698 # A function generating function that sets up the initial environment
1689 # A function generating function that sets up the initial environment
1699 # the inner function.
1690 # the inner function.
1700 def filenode_collector(changedfiles):
1691 def filenode_collector(changedfiles):
1701 next_rev = [0]
1692 next_rev = [0]
1702 # This gathers information from each manifestnode included in the
1693 # This gathers information from each manifestnode included in the
1703 # changegroup about which filenodes the manifest node references
1694 # changegroup about which filenodes the manifest node references
1704 # so we can include those in the changegroup too.
1695 # so we can include those in the changegroup too.
1705 #
1696 #
1706 # It also remembers which changenode each filenode belongs to. It
1697 # It also remembers which changenode each filenode belongs to. It
1707 # does this by assuming the a filenode belongs to the changenode
1698 # does this by assuming the a filenode belongs to the changenode
1708 # the first manifest that references it belongs to.
1699 # the first manifest that references it belongs to.
1709 def collect_msng_filenodes(mnfstnode):
1700 def collect_msng_filenodes(mnfstnode):
1710 r = mnfst.rev(mnfstnode)
1701 r = mnfst.rev(mnfstnode)
1711 if r == next_rev[0]:
1702 if r == next_rev[0]:
1712 # If the last rev we looked at was the one just previous,
1703 # If the last rev we looked at was the one just previous,
1713 # we only need to see a diff.
1704 # we only need to see a diff.
1714 deltamf = mnfst.readdelta(mnfstnode)
1705 deltamf = mnfst.readdelta(mnfstnode)
1715 # For each line in the delta
1706 # For each line in the delta
1716 for f, fnode in deltamf.items():
1707 for f, fnode in deltamf.items():
1717 f = changedfiles.get(f, None)
1708 f = changedfiles.get(f, None)
1718 # And if the file is in the list of files we care
1709 # And if the file is in the list of files we care
1719 # about.
1710 # about.
1720 if f is not None:
1711 if f is not None:
1721 # Get the changenode this manifest belongs to
1712 # Get the changenode this manifest belongs to
1722 clnode = msng_mnfst_set[mnfstnode]
1713 clnode = msng_mnfst_set[mnfstnode]
1723 # Create the set of filenodes for the file if
1714 # Create the set of filenodes for the file if
1724 # there isn't one already.
1715 # there isn't one already.
1725 ndset = msng_filenode_set.setdefault(f, {})
1716 ndset = msng_filenode_set.setdefault(f, {})
1726 # And set the filenode's changelog node to the
1717 # And set the filenode's changelog node to the
1727 # manifest's if it hasn't been set already.
1718 # manifest's if it hasn't been set already.
1728 ndset.setdefault(fnode, clnode)
1719 ndset.setdefault(fnode, clnode)
1729 else:
1720 else:
1730 # Otherwise we need a full manifest.
1721 # Otherwise we need a full manifest.
1731 m = mnfst.read(mnfstnode)
1722 m = mnfst.read(mnfstnode)
1732 # For every file in we care about.
1723 # For every file in we care about.
1733 for f in changedfiles:
1724 for f in changedfiles:
1734 fnode = m.get(f, None)
1725 fnode = m.get(f, None)
1735 # If it's in the manifest
1726 # If it's in the manifest
1736 if fnode is not None:
1727 if fnode is not None:
1737 # See comments above.
1728 # See comments above.
1738 clnode = msng_mnfst_set[mnfstnode]
1729 clnode = msng_mnfst_set[mnfstnode]
1739 ndset = msng_filenode_set.setdefault(f, {})
1730 ndset = msng_filenode_set.setdefault(f, {})
1740 ndset.setdefault(fnode, clnode)
1731 ndset.setdefault(fnode, clnode)
1741 # Remember the revision we hope to see next.
1732 # Remember the revision we hope to see next.
1742 next_rev[0] = r + 1
1733 next_rev[0] = r + 1
1743 return collect_msng_filenodes
1734 return collect_msng_filenodes
1744
1735
1745 # We have a list of filenodes we think we need for a file, lets remove
1736 # We have a list of filenodes we think we need for a file, lets remove
1746 # all those we now the recipient must have.
1737 # all those we now the recipient must have.
1747 def prune_filenodes(f, filerevlog):
1738 def prune_filenodes(f, filerevlog):
1748 msngset = msng_filenode_set[f]
1739 msngset = msng_filenode_set[f]
1749 hasset = {}
1740 hasset = {}
1750 # If a 'missing' filenode thinks it belongs to a changenode we
1741 # If a 'missing' filenode thinks it belongs to a changenode we
1751 # assume the recipient must have, then the recipient must have
1742 # assume the recipient must have, then the recipient must have
1752 # that filenode.
1743 # that filenode.
1753 for n in msngset:
1744 for n in msngset:
1754 clnode = cl.node(filerevlog.linkrev(n))
1745 clnode = cl.node(filerevlog.linkrev(n))
1755 if clnode in has_cl_set:
1746 if clnode in has_cl_set:
1756 hasset[n] = 1
1747 hasset[n] = 1
1757 prune_parents(filerevlog, hasset, msngset)
1748 prune_parents(filerevlog, hasset, msngset)
1758
1749
1759 # A function generator function that sets up the a context for the
1750 # A function generator function that sets up the a context for the
1760 # inner function.
1751 # inner function.
1761 def lookup_filenode_link_func(fname):
1752 def lookup_filenode_link_func(fname):
1762 msngset = msng_filenode_set[fname]
1753 msngset = msng_filenode_set[fname]
1763 # Lookup the changenode the filenode belongs to.
1754 # Lookup the changenode the filenode belongs to.
1764 def lookup_filenode_link(fnode):
1755 def lookup_filenode_link(fnode):
1765 return msngset[fnode]
1756 return msngset[fnode]
1766 return lookup_filenode_link
1757 return lookup_filenode_link
1767
1758
1768 # Add the nodes that were explicitly requested.
1759 # Add the nodes that were explicitly requested.
1769 def add_extra_nodes(name, nodes):
1760 def add_extra_nodes(name, nodes):
1770 if not extranodes or name not in extranodes:
1761 if not extranodes or name not in extranodes:
1771 return
1762 return
1772
1763
1773 for node, linknode in extranodes[name]:
1764 for node, linknode in extranodes[name]:
1774 if node not in nodes:
1765 if node not in nodes:
1775 nodes[node] = linknode
1766 nodes[node] = linknode
1776
1767
1777 # Now that we have all theses utility functions to help out and
1768 # Now that we have all theses utility functions to help out and
1778 # logically divide up the task, generate the group.
1769 # logically divide up the task, generate the group.
1779 def gengroup():
1770 def gengroup():
1780 # The set of changed files starts empty.
1771 # The set of changed files starts empty.
1781 changedfiles = {}
1772 changedfiles = {}
1782 # Create a changenode group generator that will call our functions
1773 # Create a changenode group generator that will call our functions
1783 # back to lookup the owning changenode and collect information.
1774 # back to lookup the owning changenode and collect information.
1784 group = cl.group(msng_cl_lst, identity,
1775 group = cl.group(msng_cl_lst, identity,
1785 manifest_and_file_collector(changedfiles))
1776 manifest_and_file_collector(changedfiles))
1786 for chnk in group:
1777 for chnk in group:
1787 yield chnk
1778 yield chnk
1788
1779
1789 # The list of manifests has been collected by the generator
1780 # The list of manifests has been collected by the generator
1790 # calling our functions back.
1781 # calling our functions back.
1791 prune_manifests()
1782 prune_manifests()
1792 add_extra_nodes(1, msng_mnfst_set)
1783 add_extra_nodes(1, msng_mnfst_set)
1793 msng_mnfst_lst = msng_mnfst_set.keys()
1784 msng_mnfst_lst = msng_mnfst_set.keys()
1794 # Sort the manifestnodes by revision number.
1785 # Sort the manifestnodes by revision number.
1795 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1786 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1796 # Create a generator for the manifestnodes that calls our lookup
1787 # Create a generator for the manifestnodes that calls our lookup
1797 # and data collection functions back.
1788 # and data collection functions back.
1798 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1789 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1799 filenode_collector(changedfiles))
1790 filenode_collector(changedfiles))
1800 for chnk in group:
1791 for chnk in group:
1801 yield chnk
1792 yield chnk
1802
1793
1803 # These are no longer needed, dereference and toss the memory for
1794 # These are no longer needed, dereference and toss the memory for
1804 # them.
1795 # them.
1805 msng_mnfst_lst = None
1796 msng_mnfst_lst = None
1806 msng_mnfst_set.clear()
1797 msng_mnfst_set.clear()
1807
1798
1808 if extranodes:
1799 if extranodes:
1809 for fname in extranodes:
1800 for fname in extranodes:
1810 if isinstance(fname, int):
1801 if isinstance(fname, int):
1811 continue
1802 continue
1812 add_extra_nodes(fname,
1803 add_extra_nodes(fname,
1813 msng_filenode_set.setdefault(fname, {}))
1804 msng_filenode_set.setdefault(fname, {}))
1814 changedfiles[fname] = 1
1805 changedfiles[fname] = 1
1815 # Go through all our files in order sorted by name.
1806 # Go through all our files in order sorted by name.
1816 for fname in util.sort(changedfiles):
1807 for fname in util.sort(changedfiles):
1817 filerevlog = self.file(fname)
1808 filerevlog = self.file(fname)
1818 if not len(filerevlog):
1809 if not len(filerevlog):
1819 raise util.Abort(_("empty or missing revlog for %s") % fname)
1810 raise util.Abort(_("empty or missing revlog for %s") % fname)
1820 # Toss out the filenodes that the recipient isn't really
1811 # Toss out the filenodes that the recipient isn't really
1821 # missing.
1812 # missing.
1822 if fname in msng_filenode_set:
1813 if fname in msng_filenode_set:
1823 prune_filenodes(fname, filerevlog)
1814 prune_filenodes(fname, filerevlog)
1824 msng_filenode_lst = msng_filenode_set[fname].keys()
1815 msng_filenode_lst = msng_filenode_set[fname].keys()
1825 else:
1816 else:
1826 msng_filenode_lst = []
1817 msng_filenode_lst = []
1827 # If any filenodes are left, generate the group for them,
1818 # If any filenodes are left, generate the group for them,
1828 # otherwise don't bother.
1819 # otherwise don't bother.
1829 if len(msng_filenode_lst) > 0:
1820 if len(msng_filenode_lst) > 0:
1830 yield changegroup.chunkheader(len(fname))
1821 yield changegroup.chunkheader(len(fname))
1831 yield fname
1822 yield fname
1832 # Sort the filenodes by their revision #
1823 # Sort the filenodes by their revision #
1833 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1824 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1834 # Create a group generator and only pass in a changenode
1825 # Create a group generator and only pass in a changenode
1835 # lookup function as we need to collect no information
1826 # lookup function as we need to collect no information
1836 # from filenodes.
1827 # from filenodes.
1837 group = filerevlog.group(msng_filenode_lst,
1828 group = filerevlog.group(msng_filenode_lst,
1838 lookup_filenode_link_func(fname))
1829 lookup_filenode_link_func(fname))
1839 for chnk in group:
1830 for chnk in group:
1840 yield chnk
1831 yield chnk
1841 if fname in msng_filenode_set:
1832 if fname in msng_filenode_set:
1842 # Don't need this anymore, toss it to free memory.
1833 # Don't need this anymore, toss it to free memory.
1843 del msng_filenode_set[fname]
1834 del msng_filenode_set[fname]
1844 # Signal that no more groups are left.
1835 # Signal that no more groups are left.
1845 yield changegroup.closechunk()
1836 yield changegroup.closechunk()
1846
1837
1847 if msng_cl_lst:
1838 if msng_cl_lst:
1848 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1839 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1849
1840
1850 return util.chunkbuffer(gengroup())
1841 return util.chunkbuffer(gengroup())
1851
1842
1852 def changegroup(self, basenodes, source):
1843 def changegroup(self, basenodes, source):
1853 """Generate a changegroup of all nodes that we have that a recipient
1844 """Generate a changegroup of all nodes that we have that a recipient
1854 doesn't.
1845 doesn't.
1855
1846
1856 This is much easier than the previous function as we can assume that
1847 This is much easier than the previous function as we can assume that
1857 the recipient has any changenode we aren't sending them."""
1848 the recipient has any changenode we aren't sending them."""
1858
1849
1859 self.hook('preoutgoing', throw=True, source=source)
1850 self.hook('preoutgoing', throw=True, source=source)
1860
1851
1861 cl = self.changelog
1852 cl = self.changelog
1862 nodes = cl.nodesbetween(basenodes, None)[0]
1853 nodes = cl.nodesbetween(basenodes, None)[0]
1863 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1854 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1864 self.changegroupinfo(nodes, source)
1855 self.changegroupinfo(nodes, source)
1865
1856
1866 def identity(x):
1857 def identity(x):
1867 return x
1858 return x
1868
1859
1869 def gennodelst(log):
1860 def gennodelst(log):
1870 for r in log:
1861 for r in log:
1871 n = log.node(r)
1862 n = log.node(r)
1872 if log.linkrev(n) in revset:
1863 if log.linkrev(n) in revset:
1873 yield n
1864 yield n
1874
1865
1875 def changed_file_collector(changedfileset):
1866 def changed_file_collector(changedfileset):
1876 def collect_changed_files(clnode):
1867 def collect_changed_files(clnode):
1877 c = cl.read(clnode)
1868 c = cl.read(clnode)
1878 for fname in c[3]:
1869 for fname in c[3]:
1879 changedfileset[fname] = 1
1870 changedfileset[fname] = 1
1880 return collect_changed_files
1871 return collect_changed_files
1881
1872
1882 def lookuprevlink_func(revlog):
1873 def lookuprevlink_func(revlog):
1883 def lookuprevlink(n):
1874 def lookuprevlink(n):
1884 return cl.node(revlog.linkrev(n))
1875 return cl.node(revlog.linkrev(n))
1885 return lookuprevlink
1876 return lookuprevlink
1886
1877
1887 def gengroup():
1878 def gengroup():
1888 # construct a list of all changed files
1879 # construct a list of all changed files
1889 changedfiles = {}
1880 changedfiles = {}
1890
1881
1891 for chnk in cl.group(nodes, identity,
1882 for chnk in cl.group(nodes, identity,
1892 changed_file_collector(changedfiles)):
1883 changed_file_collector(changedfiles)):
1893 yield chnk
1884 yield chnk
1894
1885
1895 mnfst = self.manifest
1886 mnfst = self.manifest
1896 nodeiter = gennodelst(mnfst)
1887 nodeiter = gennodelst(mnfst)
1897 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1888 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1898 yield chnk
1889 yield chnk
1899
1890
1900 for fname in util.sort(changedfiles):
1891 for fname in util.sort(changedfiles):
1901 filerevlog = self.file(fname)
1892 filerevlog = self.file(fname)
1902 if not len(filerevlog):
1893 if not len(filerevlog):
1903 raise util.Abort(_("empty or missing revlog for %s") % fname)
1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 nodeiter = gennodelst(filerevlog)
1895 nodeiter = gennodelst(filerevlog)
1905 nodeiter = list(nodeiter)
1896 nodeiter = list(nodeiter)
1906 if nodeiter:
1897 if nodeiter:
1907 yield changegroup.chunkheader(len(fname))
1898 yield changegroup.chunkheader(len(fname))
1908 yield fname
1899 yield fname
1909 lookup = lookuprevlink_func(filerevlog)
1900 lookup = lookuprevlink_func(filerevlog)
1910 for chnk in filerevlog.group(nodeiter, lookup):
1901 for chnk in filerevlog.group(nodeiter, lookup):
1911 yield chnk
1902 yield chnk
1912
1903
1913 yield changegroup.closechunk()
1904 yield changegroup.closechunk()
1914
1905
1915 if nodes:
1906 if nodes:
1916 self.hook('outgoing', node=hex(nodes[0]), source=source)
1907 self.hook('outgoing', node=hex(nodes[0]), source=source)
1917
1908
1918 return util.chunkbuffer(gengroup())
1909 return util.chunkbuffer(gengroup())
1919
1910
1920 def addchangegroup(self, source, srctype, url, emptyok=False):
1911 def addchangegroup(self, source, srctype, url, emptyok=False):
1921 """add changegroup to repo.
1912 """add changegroup to repo.
1922
1913
1923 return values:
1914 return values:
1924 - nothing changed or no source: 0
1915 - nothing changed or no source: 0
1925 - more heads than before: 1+added heads (2..n)
1916 - more heads than before: 1+added heads (2..n)
1926 - less heads than before: -1-removed heads (-2..-n)
1917 - less heads than before: -1-removed heads (-2..-n)
1927 - number of heads stays the same: 1
1918 - number of heads stays the same: 1
1928 """
1919 """
1929 def csmap(x):
1920 def csmap(x):
1930 self.ui.debug(_("add changeset %s\n") % short(x))
1921 self.ui.debug(_("add changeset %s\n") % short(x))
1931 return len(cl)
1922 return len(cl)
1932
1923
1933 def revmap(x):
1924 def revmap(x):
1934 return cl.rev(x)
1925 return cl.rev(x)
1935
1926
1936 if not source:
1927 if not source:
1937 return 0
1928 return 0
1938
1929
1939 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1930 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1940
1931
1941 changesets = files = revisions = 0
1932 changesets = files = revisions = 0
1942
1933
1943 # write changelog data to temp files so concurrent readers will not see
1934 # write changelog data to temp files so concurrent readers will not see
1944 # inconsistent view
1935 # inconsistent view
1945 cl = self.changelog
1936 cl = self.changelog
1946 cl.delayupdate()
1937 cl.delayupdate()
1947 oldheads = len(cl.heads())
1938 oldheads = len(cl.heads())
1948
1939
1949 tr = self.transaction()
1940 tr = self.transaction()
1950 try:
1941 try:
1951 trp = weakref.proxy(tr)
1942 trp = weakref.proxy(tr)
1952 # pull off the changeset group
1943 # pull off the changeset group
1953 self.ui.status(_("adding changesets\n"))
1944 self.ui.status(_("adding changesets\n"))
1954 cor = len(cl) - 1
1945 cor = len(cl) - 1
1955 chunkiter = changegroup.chunkiter(source)
1946 chunkiter = changegroup.chunkiter(source)
1956 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1947 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1957 raise util.Abort(_("received changelog group is empty"))
1948 raise util.Abort(_("received changelog group is empty"))
1958 cnr = len(cl) - 1
1949 cnr = len(cl) - 1
1959 changesets = cnr - cor
1950 changesets = cnr - cor
1960
1951
1961 # pull off the manifest group
1952 # pull off the manifest group
1962 self.ui.status(_("adding manifests\n"))
1953 self.ui.status(_("adding manifests\n"))
1963 chunkiter = changegroup.chunkiter(source)
1954 chunkiter = changegroup.chunkiter(source)
1964 # no need to check for empty manifest group here:
1955 # no need to check for empty manifest group here:
1965 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1956 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1966 # no new manifest will be created and the manifest group will
1957 # no new manifest will be created and the manifest group will
1967 # be empty during the pull
1958 # be empty during the pull
1968 self.manifest.addgroup(chunkiter, revmap, trp)
1959 self.manifest.addgroup(chunkiter, revmap, trp)
1969
1960
1970 # process the files
1961 # process the files
1971 self.ui.status(_("adding file changes\n"))
1962 self.ui.status(_("adding file changes\n"))
1972 while 1:
1963 while 1:
1973 f = changegroup.getchunk(source)
1964 f = changegroup.getchunk(source)
1974 if not f:
1965 if not f:
1975 break
1966 break
1976 self.ui.debug(_("adding %s revisions\n") % f)
1967 self.ui.debug(_("adding %s revisions\n") % f)
1977 fl = self.file(f)
1968 fl = self.file(f)
1978 o = len(fl)
1969 o = len(fl)
1979 chunkiter = changegroup.chunkiter(source)
1970 chunkiter = changegroup.chunkiter(source)
1980 if fl.addgroup(chunkiter, revmap, trp) is None:
1971 if fl.addgroup(chunkiter, revmap, trp) is None:
1981 raise util.Abort(_("received file revlog group is empty"))
1972 raise util.Abort(_("received file revlog group is empty"))
1982 revisions += len(fl) - o
1973 revisions += len(fl) - o
1983 files += 1
1974 files += 1
1984
1975
1985 # make changelog see real files again
1976 # make changelog see real files again
1986 cl.finalize(trp)
1977 cl.finalize(trp)
1987
1978
1988 newheads = len(self.changelog.heads())
1979 newheads = len(self.changelog.heads())
1989 heads = ""
1980 heads = ""
1990 if oldheads and newheads != oldheads:
1981 if oldheads and newheads != oldheads:
1991 heads = _(" (%+d heads)") % (newheads - oldheads)
1982 heads = _(" (%+d heads)") % (newheads - oldheads)
1992
1983
1993 self.ui.status(_("added %d changesets"
1984 self.ui.status(_("added %d changesets"
1994 " with %d changes to %d files%s\n")
1985 " with %d changes to %d files%s\n")
1995 % (changesets, revisions, files, heads))
1986 % (changesets, revisions, files, heads))
1996
1987
1997 if changesets > 0:
1988 if changesets > 0:
1998 self.hook('pretxnchangegroup', throw=True,
1989 self.hook('pretxnchangegroup', throw=True,
1999 node=hex(self.changelog.node(cor+1)), source=srctype,
1990 node=hex(self.changelog.node(cor+1)), source=srctype,
2000 url=url)
1991 url=url)
2001
1992
2002 tr.close()
1993 tr.close()
2003 finally:
1994 finally:
2004 del tr
1995 del tr
2005
1996
2006 if changesets > 0:
1997 if changesets > 0:
2007 # forcefully update the on-disk branch cache
1998 # forcefully update the on-disk branch cache
2008 self.ui.debug(_("updating the branch cache\n"))
1999 self.ui.debug(_("updating the branch cache\n"))
2009 self.branchtags()
2000 self.branchtags()
2010 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2001 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2011 source=srctype, url=url)
2002 source=srctype, url=url)
2012
2003
2013 for i in xrange(cor + 1, cnr + 1):
2004 for i in xrange(cor + 1, cnr + 1):
2014 self.hook("incoming", node=hex(self.changelog.node(i)),
2005 self.hook("incoming", node=hex(self.changelog.node(i)),
2015 source=srctype, url=url)
2006 source=srctype, url=url)
2016
2007
2017 # never return 0 here:
2008 # never return 0 here:
2018 if newheads < oldheads:
2009 if newheads < oldheads:
2019 return newheads - oldheads - 1
2010 return newheads - oldheads - 1
2020 else:
2011 else:
2021 return newheads - oldheads + 1
2012 return newheads - oldheads + 1
2022
2013
2023
2014
2024 def stream_in(self, remote):
2015 def stream_in(self, remote):
2025 fp = remote.stream_out()
2016 fp = remote.stream_out()
2026 l = fp.readline()
2017 l = fp.readline()
2027 try:
2018 try:
2028 resp = int(l)
2019 resp = int(l)
2029 except ValueError:
2020 except ValueError:
2030 raise util.UnexpectedOutput(
2021 raise util.UnexpectedOutput(
2031 _('Unexpected response from remote server:'), l)
2022 _('Unexpected response from remote server:'), l)
2032 if resp == 1:
2023 if resp == 1:
2033 raise util.Abort(_('operation forbidden by server'))
2024 raise util.Abort(_('operation forbidden by server'))
2034 elif resp == 2:
2025 elif resp == 2:
2035 raise util.Abort(_('locking the remote repository failed'))
2026 raise util.Abort(_('locking the remote repository failed'))
2036 elif resp != 0:
2027 elif resp != 0:
2037 raise util.Abort(_('the server sent an unknown error code'))
2028 raise util.Abort(_('the server sent an unknown error code'))
2038 self.ui.status(_('streaming all changes\n'))
2029 self.ui.status(_('streaming all changes\n'))
2039 l = fp.readline()
2030 l = fp.readline()
2040 try:
2031 try:
2041 total_files, total_bytes = map(int, l.split(' ', 1))
2032 total_files, total_bytes = map(int, l.split(' ', 1))
2042 except (ValueError, TypeError):
2033 except (ValueError, TypeError):
2043 raise util.UnexpectedOutput(
2034 raise util.UnexpectedOutput(
2044 _('Unexpected response from remote server:'), l)
2035 _('Unexpected response from remote server:'), l)
2045 self.ui.status(_('%d files to transfer, %s of data\n') %
2036 self.ui.status(_('%d files to transfer, %s of data\n') %
2046 (total_files, util.bytecount(total_bytes)))
2037 (total_files, util.bytecount(total_bytes)))
2047 start = time.time()
2038 start = time.time()
2048 for i in xrange(total_files):
2039 for i in xrange(total_files):
2049 # XXX doesn't support '\n' or '\r' in filenames
2040 # XXX doesn't support '\n' or '\r' in filenames
2050 l = fp.readline()
2041 l = fp.readline()
2051 try:
2042 try:
2052 name, size = l.split('\0', 1)
2043 name, size = l.split('\0', 1)
2053 size = int(size)
2044 size = int(size)
2054 except ValueError, TypeError:
2045 except ValueError, TypeError:
2055 raise util.UnexpectedOutput(
2046 raise util.UnexpectedOutput(
2056 _('Unexpected response from remote server:'), l)
2047 _('Unexpected response from remote server:'), l)
2057 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2048 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2058 ofp = self.sopener(name, 'w')
2049 ofp = self.sopener(name, 'w')
2059 for chunk in util.filechunkiter(fp, limit=size):
2050 for chunk in util.filechunkiter(fp, limit=size):
2060 ofp.write(chunk)
2051 ofp.write(chunk)
2061 ofp.close()
2052 ofp.close()
2062 elapsed = time.time() - start
2053 elapsed = time.time() - start
2063 if elapsed <= 0:
2054 if elapsed <= 0:
2064 elapsed = 0.001
2055 elapsed = 0.001
2065 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2056 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2066 (util.bytecount(total_bytes), elapsed,
2057 (util.bytecount(total_bytes), elapsed,
2067 util.bytecount(total_bytes / elapsed)))
2058 util.bytecount(total_bytes / elapsed)))
2068 self.invalidate()
2059 self.invalidate()
2069 return len(self.heads()) + 1
2060 return len(self.heads()) + 1
2070
2061
2071 def clone(self, remote, heads=[], stream=False):
2062 def clone(self, remote, heads=[], stream=False):
2072 '''clone remote repository.
2063 '''clone remote repository.
2073
2064
2074 keyword arguments:
2065 keyword arguments:
2075 heads: list of revs to clone (forces use of pull)
2066 heads: list of revs to clone (forces use of pull)
2076 stream: use streaming clone if possible'''
2067 stream: use streaming clone if possible'''
2077
2068
2078 # now, all clients that can request uncompressed clones can
2069 # now, all clients that can request uncompressed clones can
2079 # read repo formats supported by all servers that can serve
2070 # read repo formats supported by all servers that can serve
2080 # them.
2071 # them.
2081
2072
2082 # if revlog format changes, client will have to check version
2073 # if revlog format changes, client will have to check version
2083 # and format flags on "stream" capability, and use
2074 # and format flags on "stream" capability, and use
2084 # uncompressed only if compatible.
2075 # uncompressed only if compatible.
2085
2076
2086 if stream and not heads and remote.capable('stream'):
2077 if stream and not heads and remote.capable('stream'):
2087 return self.stream_in(remote)
2078 return self.stream_in(remote)
2088 return self.pull(remote, heads)
2079 return self.pull(remote, heads)
2089
2080
2090 # used to avoid circular references so destructors work
2081 # used to avoid circular references so destructors work
2091 def aftertrans(files):
2082 def aftertrans(files):
2092 renamefiles = [tuple(t) for t in files]
2083 renamefiles = [tuple(t) for t in files]
2093 def a():
2084 def a():
2094 for src, dest in renamefiles:
2085 for src, dest in renamefiles:
2095 util.rename(src, dest)
2086 util.rename(src, dest)
2096 return a
2087 return a
2097
2088
2098 def instance(ui, path, create):
2089 def instance(ui, path, create):
2099 return localrepository(ui, util.drop_scheme('file', path), create)
2090 return localrepository(ui, util.drop_scheme('file', path), create)
2100
2091
2101 def islocal(path):
2092 def islocal(path):
2102 return True
2093 return True
General Comments 0
You need to be logged in to leave comments. Login now