##// END OF EJS Templates
add format.usefncache config option (default is true)...
Adrian Buehlmann -
r7234:ae70fe61 default
parent child Browse files
Show More
@@ -1,2126 +1,2127 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 requirements.append("fncache")
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 # create an invalid changelog
40 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
43 )
44 )
44 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
45 for r in requirements:
46 for r in requirements:
46 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
47 reqfile.close()
48 reqfile.close()
48 else:
49 else:
49 raise repo.RepoError(_("repository %s not found") % path)
50 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
51 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
52 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
53 else:
53 # find requirements
54 # find requirements
54 requirements = []
55 requirements = []
55 try:
56 try:
56 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
57 for r in requirements:
58 for r in requirements:
58 if r not in self.supported:
59 if r not in self.supported:
59 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 except IOError, inst:
61 except IOError, inst:
61 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
62 raise
63 raise
63
64
64 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
65 self.spath = self.store.path
66 self.spath = self.store.path
66 self.sopener = self.store.opener
67 self.sopener = self.store.opener
67 self.sjoin = self.store.join
68 self.sjoin = self.store.join
68 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
69
70
70 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
71 try:
72 try:
72 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
74 except IOError:
75 except IOError:
75 pass
76 pass
76
77
77 self.tagscache = None
78 self.tagscache = None
78 self._tagstypecache = None
79 self._tagstypecache = None
79 self.branchcache = None
80 self.branchcache = None
80 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._branchcachetip = None
82 self._branchcachetip = None
82 self.nodetagscache = None
83 self.nodetagscache = None
83 self.filterpats = {}
84 self.filterpats = {}
84 self._datafilters = {}
85 self._datafilters = {}
85 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
86
87
87 def __getattr__(self, name):
88 def __getattr__(self, name):
88 if name == 'changelog':
89 if name == 'changelog':
89 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
90 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
91 return self.changelog
92 return self.changelog
92 if name == 'manifest':
93 if name == 'manifest':
93 self.changelog
94 self.changelog
94 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
95 return self.manifest
96 return self.manifest
96 if name == 'dirstate':
97 if name == 'dirstate':
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 return self.dirstate
99 return self.dirstate
99 else:
100 else:
100 raise AttributeError(name)
101 raise AttributeError(name)
101
102
102 def __getitem__(self, changeid):
103 def __getitem__(self, changeid):
103 if changeid == None:
104 if changeid == None:
104 return context.workingctx(self)
105 return context.workingctx(self)
105 return context.changectx(self, changeid)
106 return context.changectx(self, changeid)
106
107
107 def __nonzero__(self):
108 def __nonzero__(self):
108 return True
109 return True
109
110
110 def __len__(self):
111 def __len__(self):
111 return len(self.changelog)
112 return len(self.changelog)
112
113
113 def __iter__(self):
114 def __iter__(self):
114 for i in xrange(len(self)):
115 for i in xrange(len(self)):
115 yield i
116 yield i
116
117
117 def url(self):
118 def url(self):
118 return 'file:' + self.root
119 return 'file:' + self.root
119
120
120 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
121 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
122
123
123 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
124
125
125 def _tag(self, names, node, message, local, user, date, parent=None,
126 def _tag(self, names, node, message, local, user, date, parent=None,
126 extra={}):
127 extra={}):
127 use_dirstate = parent is None
128 use_dirstate = parent is None
128
129
129 if isinstance(names, str):
130 if isinstance(names, str):
130 allchars = names
131 allchars = names
131 names = (names,)
132 names = (names,)
132 else:
133 else:
133 allchars = ''.join(names)
134 allchars = ''.join(names)
134 for c in self.tag_disallowed:
135 for c in self.tag_disallowed:
135 if c in allchars:
136 if c in allchars:
136 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
137
138
138 for name in names:
139 for name in names:
139 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 local=local)
141 local=local)
141
142
142 def writetags(fp, names, munge, prevtags):
143 def writetags(fp, names, munge, prevtags):
143 fp.seek(0, 2)
144 fp.seek(0, 2)
144 if prevtags and prevtags[-1] != '\n':
145 if prevtags and prevtags[-1] != '\n':
145 fp.write('\n')
146 fp.write('\n')
146 for name in names:
147 for name in names:
147 m = munge and munge(name) or name
148 m = munge and munge(name) or name
148 if self._tagstypecache and name in self._tagstypecache:
149 if self._tagstypecache and name in self._tagstypecache:
149 old = self.tagscache.get(name, nullid)
150 old = self.tagscache.get(name, nullid)
150 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(node), m))
152 fp.write('%s %s\n' % (hex(node), m))
152 fp.close()
153 fp.close()
153
154
154 prevtags = ''
155 prevtags = ''
155 if local:
156 if local:
156 try:
157 try:
157 fp = self.opener('localtags', 'r+')
158 fp = self.opener('localtags', 'r+')
158 except IOError, err:
159 except IOError, err:
159 fp = self.opener('localtags', 'a')
160 fp = self.opener('localtags', 'a')
160 else:
161 else:
161 prevtags = fp.read()
162 prevtags = fp.read()
162
163
163 # local tags are stored in the current charset
164 # local tags are stored in the current charset
164 writetags(fp, names, None, prevtags)
165 writetags(fp, names, None, prevtags)
165 for name in names:
166 for name in names:
166 self.hook('tag', node=hex(node), tag=name, local=local)
167 self.hook('tag', node=hex(node), tag=name, local=local)
167 return
168 return
168
169
169 if use_dirstate:
170 if use_dirstate:
170 try:
171 try:
171 fp = self.wfile('.hgtags', 'rb+')
172 fp = self.wfile('.hgtags', 'rb+')
172 except IOError, err:
173 except IOError, err:
173 fp = self.wfile('.hgtags', 'ab')
174 fp = self.wfile('.hgtags', 'ab')
174 else:
175 else:
175 prevtags = fp.read()
176 prevtags = fp.read()
176 else:
177 else:
177 try:
178 try:
178 prevtags = self.filectx('.hgtags', parent).data()
179 prevtags = self.filectx('.hgtags', parent).data()
179 except revlog.LookupError:
180 except revlog.LookupError:
180 pass
181 pass
181 fp = self.wfile('.hgtags', 'wb')
182 fp = self.wfile('.hgtags', 'wb')
182 if prevtags:
183 if prevtags:
183 fp.write(prevtags)
184 fp.write(prevtags)
184
185
185 # committed tags are stored in UTF-8
186 # committed tags are stored in UTF-8
186 writetags(fp, names, util.fromlocal, prevtags)
187 writetags(fp, names, util.fromlocal, prevtags)
187
188
188 if use_dirstate and '.hgtags' not in self.dirstate:
189 if use_dirstate and '.hgtags' not in self.dirstate:
189 self.add(['.hgtags'])
190 self.add(['.hgtags'])
190
191
191 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 extra=extra)
193 extra=extra)
193
194
194 for name in names:
195 for name in names:
195 self.hook('tag', node=hex(node), tag=name, local=local)
196 self.hook('tag', node=hex(node), tag=name, local=local)
196
197
197 return tagnode
198 return tagnode
198
199
199 def tag(self, names, node, message, local, user, date):
200 def tag(self, names, node, message, local, user, date):
200 '''tag a revision with one or more symbolic names.
201 '''tag a revision with one or more symbolic names.
201
202
202 names is a list of strings or, when adding a single tag, names may be a
203 names is a list of strings or, when adding a single tag, names may be a
203 string.
204 string.
204
205
205 if local is True, the tags are stored in a per-repository file.
206 if local is True, the tags are stored in a per-repository file.
206 otherwise, they are stored in the .hgtags file, and a new
207 otherwise, they are stored in the .hgtags file, and a new
207 changeset is committed with the change.
208 changeset is committed with the change.
208
209
209 keyword arguments:
210 keyword arguments:
210
211
211 local: whether to store tags in non-version-controlled file
212 local: whether to store tags in non-version-controlled file
212 (default False)
213 (default False)
213
214
214 message: commit message to use if committing
215 message: commit message to use if committing
215
216
216 user: name of user to use if committing
217 user: name of user to use if committing
217
218
218 date: date tuple to use if committing'''
219 date: date tuple to use if committing'''
219
220
220 for x in self.status()[:5]:
221 for x in self.status()[:5]:
221 if '.hgtags' in x:
222 if '.hgtags' in x:
222 raise util.Abort(_('working copy of .hgtags is changed '
223 raise util.Abort(_('working copy of .hgtags is changed '
223 '(please commit .hgtags manually)'))
224 '(please commit .hgtags manually)'))
224
225
225 self._tag(names, node, message, local, user, date)
226 self._tag(names, node, message, local, user, date)
226
227
227 def tags(self):
228 def tags(self):
228 '''return a mapping of tag to node'''
229 '''return a mapping of tag to node'''
229 if self.tagscache:
230 if self.tagscache:
230 return self.tagscache
231 return self.tagscache
231
232
232 globaltags = {}
233 globaltags = {}
233 tagtypes = {}
234 tagtypes = {}
234
235
235 def readtags(lines, fn, tagtype):
236 def readtags(lines, fn, tagtype):
236 filetags = {}
237 filetags = {}
237 count = 0
238 count = 0
238
239
239 def warn(msg):
240 def warn(msg):
240 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241
242
242 for l in lines:
243 for l in lines:
243 count += 1
244 count += 1
244 if not l:
245 if not l:
245 continue
246 continue
246 s = l.split(" ", 1)
247 s = l.split(" ", 1)
247 if len(s) != 2:
248 if len(s) != 2:
248 warn(_("cannot parse entry"))
249 warn(_("cannot parse entry"))
249 continue
250 continue
250 node, key = s
251 node, key = s
251 key = util.tolocal(key.strip()) # stored in UTF-8
252 key = util.tolocal(key.strip()) # stored in UTF-8
252 try:
253 try:
253 bin_n = bin(node)
254 bin_n = bin(node)
254 except TypeError:
255 except TypeError:
255 warn(_("node '%s' is not well formed") % node)
256 warn(_("node '%s' is not well formed") % node)
256 continue
257 continue
257 if bin_n not in self.changelog.nodemap:
258 if bin_n not in self.changelog.nodemap:
258 warn(_("tag '%s' refers to unknown node") % key)
259 warn(_("tag '%s' refers to unknown node") % key)
259 continue
260 continue
260
261
261 h = []
262 h = []
262 if key in filetags:
263 if key in filetags:
263 n, h = filetags[key]
264 n, h = filetags[key]
264 h.append(n)
265 h.append(n)
265 filetags[key] = (bin_n, h)
266 filetags[key] = (bin_n, h)
266
267
267 for k, nh in filetags.items():
268 for k, nh in filetags.items():
268 if k not in globaltags:
269 if k not in globaltags:
269 globaltags[k] = nh
270 globaltags[k] = nh
270 tagtypes[k] = tagtype
271 tagtypes[k] = tagtype
271 continue
272 continue
272
273
273 # we prefer the global tag if:
274 # we prefer the global tag if:
274 # it supercedes us OR
275 # it supercedes us OR
275 # mutual supercedes and it has a higher rank
276 # mutual supercedes and it has a higher rank
276 # otherwise we win because we're tip-most
277 # otherwise we win because we're tip-most
277 an, ah = nh
278 an, ah = nh
278 bn, bh = globaltags[k]
279 bn, bh = globaltags[k]
279 if (bn != an and an in bh and
280 if (bn != an and an in bh and
280 (bn not in ah or len(bh) > len(ah))):
281 (bn not in ah or len(bh) > len(ah))):
281 an = bn
282 an = bn
282 ah.extend([n for n in bh if n not in ah])
283 ah.extend([n for n in bh if n not in ah])
283 globaltags[k] = an, ah
284 globaltags[k] = an, ah
284 tagtypes[k] = tagtype
285 tagtypes[k] = tagtype
285
286
286 # read the tags file from each head, ending with the tip
287 # read the tags file from each head, ending with the tip
287 f = None
288 f = None
288 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
289 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
290 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
291 readtags(f.data().splitlines(), f, "global")
292 readtags(f.data().splitlines(), f, "global")
292
293
293 try:
294 try:
294 data = util.fromlocal(self.opener("localtags").read())
295 data = util.fromlocal(self.opener("localtags").read())
295 # localtags are stored in the local character set
296 # localtags are stored in the local character set
296 # while the internal tag table is stored in UTF-8
297 # while the internal tag table is stored in UTF-8
297 readtags(data.splitlines(), "localtags", "local")
298 readtags(data.splitlines(), "localtags", "local")
298 except IOError:
299 except IOError:
299 pass
300 pass
300
301
301 self.tagscache = {}
302 self.tagscache = {}
302 self._tagstypecache = {}
303 self._tagstypecache = {}
303 for k,nh in globaltags.items():
304 for k,nh in globaltags.items():
304 n = nh[0]
305 n = nh[0]
305 if n != nullid:
306 if n != nullid:
306 self.tagscache[k] = n
307 self.tagscache[k] = n
307 self._tagstypecache[k] = tagtypes[k]
308 self._tagstypecache[k] = tagtypes[k]
308 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
309 return self.tagscache
310 return self.tagscache
310
311
311 def tagtype(self, tagname):
312 def tagtype(self, tagname):
312 '''
313 '''
313 return the type of the given tag. result can be:
314 return the type of the given tag. result can be:
314
315
315 'local' : a local tag
316 'local' : a local tag
316 'global' : a global tag
317 'global' : a global tag
317 None : tag does not exist
318 None : tag does not exist
318 '''
319 '''
319
320
320 self.tags()
321 self.tags()
321
322
322 return self._tagstypecache.get(tagname)
323 return self._tagstypecache.get(tagname)
323
324
324 def _hgtagsnodes(self):
325 def _hgtagsnodes(self):
325 heads = self.heads()
326 heads = self.heads()
326 heads.reverse()
327 heads.reverse()
327 last = {}
328 last = {}
328 ret = []
329 ret = []
329 for node in heads:
330 for node in heads:
330 c = self[node]
331 c = self[node]
331 rev = c.rev()
332 rev = c.rev()
332 try:
333 try:
333 fnode = c.filenode('.hgtags')
334 fnode = c.filenode('.hgtags')
334 except revlog.LookupError:
335 except revlog.LookupError:
335 continue
336 continue
336 ret.append((rev, node, fnode))
337 ret.append((rev, node, fnode))
337 if fnode in last:
338 if fnode in last:
338 ret[last[fnode]] = None
339 ret[last[fnode]] = None
339 last[fnode] = len(ret) - 1
340 last[fnode] = len(ret) - 1
340 return [item for item in ret if item]
341 return [item for item in ret if item]
341
342
342 def tagslist(self):
343 def tagslist(self):
343 '''return a list of tags ordered by revision'''
344 '''return a list of tags ordered by revision'''
344 l = []
345 l = []
345 for t, n in self.tags().items():
346 for t, n in self.tags().items():
346 try:
347 try:
347 r = self.changelog.rev(n)
348 r = self.changelog.rev(n)
348 except:
349 except:
349 r = -2 # sort to the beginning of the list if unknown
350 r = -2 # sort to the beginning of the list if unknown
350 l.append((r, t, n))
351 l.append((r, t, n))
351 return [(t, n) for r, t, n in util.sort(l)]
352 return [(t, n) for r, t, n in util.sort(l)]
352
353
353 def nodetags(self, node):
354 def nodetags(self, node):
354 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
355 if not self.nodetagscache:
356 if not self.nodetagscache:
356 self.nodetagscache = {}
357 self.nodetagscache = {}
357 for t, n in self.tags().items():
358 for t, n in self.tags().items():
358 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
359 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
360
361
361 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
362 tiprev = len(self) - 1
363 tiprev = len(self) - 1
363 if lrev != tiprev:
364 if lrev != tiprev:
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366
367
367 return partial
368 return partial
368
369
369 def branchtags(self):
370 def branchtags(self):
370 tip = self.changelog.tip()
371 tip = self.changelog.tip()
371 if self.branchcache is not None and self._branchcachetip == tip:
372 if self.branchcache is not None and self._branchcachetip == tip:
372 return self.branchcache
373 return self.branchcache
373
374
374 oldtip = self._branchcachetip
375 oldtip = self._branchcachetip
375 self._branchcachetip = tip
376 self._branchcachetip = tip
376 if self.branchcache is None:
377 if self.branchcache is None:
377 self.branchcache = {} # avoid recursion in changectx
378 self.branchcache = {} # avoid recursion in changectx
378 else:
379 else:
379 self.branchcache.clear() # keep using the same dict
380 self.branchcache.clear() # keep using the same dict
380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 if oldtip is None or oldtip not in self.changelog.nodemap:
381 partial, last, lrev = self._readbranchcache()
382 partial, last, lrev = self._readbranchcache()
382 else:
383 else:
383 lrev = self.changelog.rev(oldtip)
384 lrev = self.changelog.rev(oldtip)
384 partial = self._ubranchcache
385 partial = self._ubranchcache
385
386
386 self._branchtags(partial, lrev)
387 self._branchtags(partial, lrev)
387
388
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
390 # charset internally
390 for k, v in partial.items():
391 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
392 self.branchcache[util.tolocal(k)] = v
392 self._ubranchcache = partial
393 self._ubranchcache = partial
393 return self.branchcache
394 return self.branchcache
394
395
395 def _readbranchcache(self):
396 def _readbranchcache(self):
396 partial = {}
397 partial = {}
397 try:
398 try:
398 f = self.opener("branch.cache")
399 f = self.opener("branch.cache")
399 lines = f.read().split('\n')
400 lines = f.read().split('\n')
400 f.close()
401 f.close()
401 except (IOError, OSError):
402 except (IOError, OSError):
402 return {}, nullid, nullrev
403 return {}, nullid, nullrev
403
404
404 try:
405 try:
405 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = bin(last), int(lrev)
407 last, lrev = bin(last), int(lrev)
407 if lrev >= len(self) or self[lrev].node() != last:
408 if lrev >= len(self) or self[lrev].node() != last:
408 # invalidate the cache
409 # invalidate the cache
409 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
410 for l in lines:
411 for l in lines:
411 if not l: continue
412 if not l: continue
412 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
416 raise
416 except Exception, inst:
417 except Exception, inst:
417 if self.ui.debugflag:
418 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
421 return partial, last, lrev
421
422
422 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
423 try:
424 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
429 f.rename()
429 except (IOError, OSError):
430 except (IOError, OSError):
430 pass
431 pass
431
432
432 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
434 for r in xrange(start, end):
434 c = self[r]
435 c = self[r]
435 b = c.branch()
436 b = c.branch()
436 partial[b] = c.node()
437 partial[b] = c.node()
437
438
438 def lookup(self, key):
439 def lookup(self, key):
439 if key == '.':
440 if key == '.':
440 return self.dirstate.parents()[0]
441 return self.dirstate.parents()[0]
441 elif key == 'null':
442 elif key == 'null':
442 return nullid
443 return nullid
443 n = self.changelog._match(key)
444 n = self.changelog._match(key)
444 if n:
445 if n:
445 return n
446 return n
446 if key in self.tags():
447 if key in self.tags():
447 return self.tags()[key]
448 return self.tags()[key]
448 if key in self.branchtags():
449 if key in self.branchtags():
449 return self.branchtags()[key]
450 return self.branchtags()[key]
450 n = self.changelog._partialmatch(key)
451 n = self.changelog._partialmatch(key)
451 if n:
452 if n:
452 return n
453 return n
453 try:
454 try:
454 if len(key) == 20:
455 if len(key) == 20:
455 key = hex(key)
456 key = hex(key)
456 except:
457 except:
457 pass
458 pass
458 raise repo.RepoError(_("unknown revision '%s'") % key)
459 raise repo.RepoError(_("unknown revision '%s'") % key)
459
460
460 def local(self):
461 def local(self):
461 return True
462 return True
462
463
463 def join(self, f):
464 def join(self, f):
464 return os.path.join(self.path, f)
465 return os.path.join(self.path, f)
465
466
466 def wjoin(self, f):
467 def wjoin(self, f):
467 return os.path.join(self.root, f)
468 return os.path.join(self.root, f)
468
469
469 def rjoin(self, f):
470 def rjoin(self, f):
470 return os.path.join(self.root, util.pconvert(f))
471 return os.path.join(self.root, util.pconvert(f))
471
472
472 def file(self, f):
473 def file(self, f):
473 if f[0] == '/':
474 if f[0] == '/':
474 f = f[1:]
475 f = f[1:]
475 return filelog.filelog(self.sopener, f)
476 return filelog.filelog(self.sopener, f)
476
477
477 def changectx(self, changeid):
478 def changectx(self, changeid):
478 return self[changeid]
479 return self[changeid]
479
480
480 def parents(self, changeid=None):
481 def parents(self, changeid=None):
481 '''get list of changectxs for parents of changeid'''
482 '''get list of changectxs for parents of changeid'''
482 return self[changeid].parents()
483 return self[changeid].parents()
483
484
484 def filectx(self, path, changeid=None, fileid=None):
485 def filectx(self, path, changeid=None, fileid=None):
485 """changeid can be a changeset revision, node, or tag.
486 """changeid can be a changeset revision, node, or tag.
486 fileid can be a file revision or node."""
487 fileid can be a file revision or node."""
487 return context.filectx(self, path, changeid, fileid)
488 return context.filectx(self, path, changeid, fileid)
488
489
489 def getcwd(self):
490 def getcwd(self):
490 return self.dirstate.getcwd()
491 return self.dirstate.getcwd()
491
492
492 def pathto(self, f, cwd=None):
493 def pathto(self, f, cwd=None):
493 return self.dirstate.pathto(f, cwd)
494 return self.dirstate.pathto(f, cwd)
494
495
495 def wfile(self, f, mode='r'):
496 def wfile(self, f, mode='r'):
496 return self.wopener(f, mode)
497 return self.wopener(f, mode)
497
498
498 def _link(self, f):
499 def _link(self, f):
499 return os.path.islink(self.wjoin(f))
500 return os.path.islink(self.wjoin(f))
500
501
501 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
502 if filter not in self.filterpats:
503 if filter not in self.filterpats:
503 l = []
504 l = []
504 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
505 if cmd == '!':
506 if cmd == '!':
506 continue
507 continue
507 mf = util.matcher(self.root, "", [pat], [], [])[1]
508 mf = util.matcher(self.root, "", [pat], [], [])[1]
508 fn = None
509 fn = None
509 params = cmd
510 params = cmd
510 for name, filterfn in self._datafilters.iteritems():
511 for name, filterfn in self._datafilters.iteritems():
511 if cmd.startswith(name):
512 if cmd.startswith(name):
512 fn = filterfn
513 fn = filterfn
513 params = cmd[len(name):].lstrip()
514 params = cmd[len(name):].lstrip()
514 break
515 break
515 if not fn:
516 if not fn:
516 fn = lambda s, c, **kwargs: util.filter(s, c)
517 fn = lambda s, c, **kwargs: util.filter(s, c)
517 # Wrap old filters not supporting keyword arguments
518 # Wrap old filters not supporting keyword arguments
518 if not inspect.getargspec(fn)[2]:
519 if not inspect.getargspec(fn)[2]:
519 oldfn = fn
520 oldfn = fn
520 fn = lambda s, c, **kwargs: oldfn(s, c)
521 fn = lambda s, c, **kwargs: oldfn(s, c)
521 l.append((mf, fn, params))
522 l.append((mf, fn, params))
522 self.filterpats[filter] = l
523 self.filterpats[filter] = l
523
524
524 for mf, fn, cmd in self.filterpats[filter]:
525 for mf, fn, cmd in self.filterpats[filter]:
525 if mf(filename):
526 if mf(filename):
526 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 break
529 break
529
530
530 return data
531 return data
531
532
532 def adddatafilter(self, name, filter):
533 def adddatafilter(self, name, filter):
533 self._datafilters[name] = filter
534 self._datafilters[name] = filter
534
535
535 def wread(self, filename):
536 def wread(self, filename):
536 if self._link(filename):
537 if self._link(filename):
537 data = os.readlink(self.wjoin(filename))
538 data = os.readlink(self.wjoin(filename))
538 else:
539 else:
539 data = self.wopener(filename, 'r').read()
540 data = self.wopener(filename, 'r').read()
540 return self._filter("encode", filename, data)
541 return self._filter("encode", filename, data)
541
542
542 def wwrite(self, filename, data, flags):
543 def wwrite(self, filename, data, flags):
543 data = self._filter("decode", filename, data)
544 data = self._filter("decode", filename, data)
544 try:
545 try:
545 os.unlink(self.wjoin(filename))
546 os.unlink(self.wjoin(filename))
546 except OSError:
547 except OSError:
547 pass
548 pass
548 if 'l' in flags:
549 if 'l' in flags:
549 self.wopener.symlink(data, filename)
550 self.wopener.symlink(data, filename)
550 else:
551 else:
551 self.wopener(filename, 'w').write(data)
552 self.wopener(filename, 'w').write(data)
552 if 'x' in flags:
553 if 'x' in flags:
553 util.set_flags(self.wjoin(filename), False, True)
554 util.set_flags(self.wjoin(filename), False, True)
554
555
555 def wwritedata(self, filename, data):
556 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
557 return self._filter("decode", filename, data)
557
558
558 def transaction(self):
559 def transaction(self):
559 if self._transref and self._transref():
560 if self._transref and self._transref():
560 return self._transref().nest()
561 return self._transref().nest()
561
562
562 # abort here if the journal already exists
563 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
564 if os.path.exists(self.sjoin("journal")):
564 raise repo.RepoError(_("journal already exists - run hg recover"))
565 raise repo.RepoError(_("journal already exists - run hg recover"))
565
566
566 # save dirstate for rollback
567 # save dirstate for rollback
567 try:
568 try:
568 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
569 except IOError:
570 except IOError:
570 ds = ""
571 ds = ""
571 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573
574
574 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 self.sjoin("journal"),
579 self.sjoin("journal"),
579 aftertrans(renames),
580 aftertrans(renames),
580 self.store.createmode)
581 self.store.createmode)
581 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
582 return tr
583 return tr
583
584
584 def recover(self):
585 def recover(self):
585 l = self.lock()
586 l = self.lock()
586 try:
587 try:
587 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
588 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 transaction.rollback(self.sopener, self.sjoin("journal"))
590 transaction.rollback(self.sopener, self.sjoin("journal"))
590 self.invalidate()
591 self.invalidate()
591 return True
592 return True
592 else:
593 else:
593 self.ui.warn(_("no interrupted transaction available\n"))
594 self.ui.warn(_("no interrupted transaction available\n"))
594 return False
595 return False
595 finally:
596 finally:
596 del l
597 del l
597
598
598 def rollback(self):
599 def rollback(self):
599 wlock = lock = None
600 wlock = lock = None
600 try:
601 try:
601 wlock = self.wlock()
602 wlock = self.wlock()
602 lock = self.lock()
603 lock = self.lock()
603 if os.path.exists(self.sjoin("undo")):
604 if os.path.exists(self.sjoin("undo")):
604 self.ui.status(_("rolling back last transaction\n"))
605 self.ui.status(_("rolling back last transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("undo"))
606 transaction.rollback(self.sopener, self.sjoin("undo"))
606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 try:
608 try:
608 branch = self.opener("undo.branch").read()
609 branch = self.opener("undo.branch").read()
609 self.dirstate.setbranch(branch)
610 self.dirstate.setbranch(branch)
610 except IOError:
611 except IOError:
611 self.ui.warn(_("Named branch could not be reset, "
612 self.ui.warn(_("Named branch could not be reset, "
612 "current branch still is: %s\n")
613 "current branch still is: %s\n")
613 % util.tolocal(self.dirstate.branch()))
614 % util.tolocal(self.dirstate.branch()))
614 self.invalidate()
615 self.invalidate()
615 self.dirstate.invalidate()
616 self.dirstate.invalidate()
616 else:
617 else:
617 self.ui.warn(_("no rollback information available\n"))
618 self.ui.warn(_("no rollback information available\n"))
618 finally:
619 finally:
619 del lock, wlock
620 del lock, wlock
620
621
621 def invalidate(self):
622 def invalidate(self):
622 for a in "changelog manifest".split():
623 for a in "changelog manifest".split():
623 if a in self.__dict__:
624 if a in self.__dict__:
624 delattr(self, a)
625 delattr(self, a)
625 self.tagscache = None
626 self.tagscache = None
626 self._tagstypecache = None
627 self._tagstypecache = None
627 self.nodetagscache = None
628 self.nodetagscache = None
628 self.branchcache = None
629 self.branchcache = None
629 self._ubranchcache = None
630 self._ubranchcache = None
630 self._branchcachetip = None
631 self._branchcachetip = None
631
632
632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 try:
634 try:
634 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 except lock.LockHeld, inst:
636 except lock.LockHeld, inst:
636 if not wait:
637 if not wait:
637 raise
638 raise
638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 (desc, inst.locker))
640 (desc, inst.locker))
640 # default to 600 seconds timeout
641 # default to 600 seconds timeout
641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 releasefn, desc=desc)
643 releasefn, desc=desc)
643 if acquirefn:
644 if acquirefn:
644 acquirefn()
645 acquirefn()
645 return l
646 return l
646
647
647 def lock(self, wait=True):
648 def lock(self, wait=True):
648 if self._lockref and self._lockref():
649 if self._lockref and self._lockref():
649 return self._lockref()
650 return self._lockref()
650
651
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 _('repository %s') % self.origroot)
653 _('repository %s') % self.origroot)
653 self._lockref = weakref.ref(l)
654 self._lockref = weakref.ref(l)
654 return l
655 return l
655
656
656 def wlock(self, wait=True):
657 def wlock(self, wait=True):
657 if self._wlockref and self._wlockref():
658 if self._wlockref and self._wlockref():
658 return self._wlockref()
659 return self._wlockref()
659
660
660 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
661 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
661 self.dirstate.invalidate, _('working directory of %s') %
662 self.dirstate.invalidate, _('working directory of %s') %
662 self.origroot)
663 self.origroot)
663 self._wlockref = weakref.ref(l)
664 self._wlockref = weakref.ref(l)
664 return l
665 return l
665
666
666 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
667 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
667 """
668 """
668 commit an individual file as part of a larger transaction
669 commit an individual file as part of a larger transaction
669 """
670 """
670
671
671 fn = fctx.path()
672 fn = fctx.path()
672 t = fctx.data()
673 t = fctx.data()
673 fl = self.file(fn)
674 fl = self.file(fn)
674 fp1 = manifest1.get(fn, nullid)
675 fp1 = manifest1.get(fn, nullid)
675 fp2 = manifest2.get(fn, nullid)
676 fp2 = manifest2.get(fn, nullid)
676
677
677 meta = {}
678 meta = {}
678 cp = fctx.renamed()
679 cp = fctx.renamed()
679 if cp and cp[0] != fn:
680 if cp and cp[0] != fn:
680 # Mark the new revision of this file as a copy of another
681 # Mark the new revision of this file as a copy of another
681 # file. This copy data will effectively act as a parent
682 # file. This copy data will effectively act as a parent
682 # of this new revision. If this is a merge, the first
683 # of this new revision. If this is a merge, the first
683 # parent will be the nullid (meaning "look up the copy data")
684 # parent will be the nullid (meaning "look up the copy data")
684 # and the second one will be the other parent. For example:
685 # and the second one will be the other parent. For example:
685 #
686 #
686 # 0 --- 1 --- 3 rev1 changes file foo
687 # 0 --- 1 --- 3 rev1 changes file foo
687 # \ / rev2 renames foo to bar and changes it
688 # \ / rev2 renames foo to bar and changes it
688 # \- 2 -/ rev3 should have bar with all changes and
689 # \- 2 -/ rev3 should have bar with all changes and
689 # should record that bar descends from
690 # should record that bar descends from
690 # bar in rev2 and foo in rev1
691 # bar in rev2 and foo in rev1
691 #
692 #
692 # this allows this merge to succeed:
693 # this allows this merge to succeed:
693 #
694 #
694 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
695 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
695 # \ / merging rev3 and rev4 should use bar@rev2
696 # \ / merging rev3 and rev4 should use bar@rev2
696 # \- 2 --- 4 as the merge base
697 # \- 2 --- 4 as the merge base
697 #
698 #
698
699
699 cf = cp[0]
700 cf = cp[0]
700 cr = manifest1.get(cf)
701 cr = manifest1.get(cf)
701 nfp = fp2
702 nfp = fp2
702
703
703 if manifest2: # branch merge
704 if manifest2: # branch merge
704 if fp2 == nullid: # copied on remote side
705 if fp2 == nullid: # copied on remote side
705 if fp1 != nullid or cf in manifest2:
706 if fp1 != nullid or cf in manifest2:
706 cr = manifest2[cf]
707 cr = manifest2[cf]
707 nfp = fp1
708 nfp = fp1
708
709
709 # find source in nearest ancestor if we've lost track
710 # find source in nearest ancestor if we've lost track
710 if not cr:
711 if not cr:
711 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
712 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
712 (fn, cf))
713 (fn, cf))
713 for a in self['.'].ancestors():
714 for a in self['.'].ancestors():
714 if cf in a:
715 if cf in a:
715 cr = a[cf].filenode()
716 cr = a[cf].filenode()
716 break
717 break
717
718
718 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
719 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
719 meta["copy"] = cf
720 meta["copy"] = cf
720 meta["copyrev"] = hex(cr)
721 meta["copyrev"] = hex(cr)
721 fp1, fp2 = nullid, nfp
722 fp1, fp2 = nullid, nfp
722 elif fp2 != nullid:
723 elif fp2 != nullid:
723 # is one parent an ancestor of the other?
724 # is one parent an ancestor of the other?
724 fpa = fl.ancestor(fp1, fp2)
725 fpa = fl.ancestor(fp1, fp2)
725 if fpa == fp1:
726 if fpa == fp1:
726 fp1, fp2 = fp2, nullid
727 fp1, fp2 = fp2, nullid
727 elif fpa == fp2:
728 elif fpa == fp2:
728 fp2 = nullid
729 fp2 = nullid
729
730
730 # is the file unmodified from the parent? report existing entry
731 # is the file unmodified from the parent? report existing entry
731 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
732 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
732 return fp1
733 return fp1
733
734
734 changelist.append(fn)
735 changelist.append(fn)
735 return fl.add(t, meta, tr, linkrev, fp1, fp2)
736 return fl.add(t, meta, tr, linkrev, fp1, fp2)
736
737
737 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
738 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
738 if p1 is None:
739 if p1 is None:
739 p1, p2 = self.dirstate.parents()
740 p1, p2 = self.dirstate.parents()
740 return self.commit(files=files, text=text, user=user, date=date,
741 return self.commit(files=files, text=text, user=user, date=date,
741 p1=p1, p2=p2, extra=extra, empty_ok=True)
742 p1=p1, p2=p2, extra=extra, empty_ok=True)
742
743
743 def commit(self, files=None, text="", user=None, date=None,
744 def commit(self, files=None, text="", user=None, date=None,
744 match=None, force=False, force_editor=False,
745 match=None, force=False, force_editor=False,
745 p1=None, p2=None, extra={}, empty_ok=False):
746 p1=None, p2=None, extra={}, empty_ok=False):
746 wlock = lock = None
747 wlock = lock = None
747 if files:
748 if files:
748 files = util.unique(files)
749 files = util.unique(files)
749 try:
750 try:
750 wlock = self.wlock()
751 wlock = self.wlock()
751 lock = self.lock()
752 lock = self.lock()
752 use_dirstate = (p1 is None) # not rawcommit
753 use_dirstate = (p1 is None) # not rawcommit
753
754
754 if use_dirstate:
755 if use_dirstate:
755 p1, p2 = self.dirstate.parents()
756 p1, p2 = self.dirstate.parents()
756 update_dirstate = True
757 update_dirstate = True
757
758
758 if (not force and p2 != nullid and
759 if (not force and p2 != nullid and
759 (match and (match.files() or match.anypats()))):
760 (match and (match.files() or match.anypats()))):
760 raise util.Abort(_('cannot partially commit a merge '
761 raise util.Abort(_('cannot partially commit a merge '
761 '(do not specify files or patterns)'))
762 '(do not specify files or patterns)'))
762
763
763 if files:
764 if files:
764 modified, removed = [], []
765 modified, removed = [], []
765 for f in files:
766 for f in files:
766 s = self.dirstate[f]
767 s = self.dirstate[f]
767 if s in 'nma':
768 if s in 'nma':
768 modified.append(f)
769 modified.append(f)
769 elif s == 'r':
770 elif s == 'r':
770 removed.append(f)
771 removed.append(f)
771 else:
772 else:
772 self.ui.warn(_("%s not tracked!\n") % f)
773 self.ui.warn(_("%s not tracked!\n") % f)
773 changes = [modified, [], removed, [], []]
774 changes = [modified, [], removed, [], []]
774 else:
775 else:
775 changes = self.status(match=match)
776 changes = self.status(match=match)
776 else:
777 else:
777 p1, p2 = p1, p2 or nullid
778 p1, p2 = p1, p2 or nullid
778 update_dirstate = (self.dirstate.parents()[0] == p1)
779 update_dirstate = (self.dirstate.parents()[0] == p1)
779 changes = [files, [], [], [], []]
780 changes = [files, [], [], [], []]
780
781
781 ms = merge_.mergestate(self)
782 ms = merge_.mergestate(self)
782 for f in changes[0]:
783 for f in changes[0]:
783 if f in ms and ms[f] == 'u':
784 if f in ms and ms[f] == 'u':
784 raise util.Abort(_("unresolved merge conflicts "
785 raise util.Abort(_("unresolved merge conflicts "
785 "(see hg resolve)"))
786 "(see hg resolve)"))
786 wctx = context.workingctx(self, (p1, p2), text, user, date,
787 wctx = context.workingctx(self, (p1, p2), text, user, date,
787 extra, changes)
788 extra, changes)
788 return self._commitctx(wctx, force, force_editor, empty_ok,
789 return self._commitctx(wctx, force, force_editor, empty_ok,
789 use_dirstate, update_dirstate)
790 use_dirstate, update_dirstate)
790 finally:
791 finally:
791 del lock, wlock
792 del lock, wlock
792
793
793 def commitctx(self, ctx):
794 def commitctx(self, ctx):
794 """Add a new revision to current repository.
795 """Add a new revision to current repository.
795
796
796 Revision information is passed in the context.memctx argument.
797 Revision information is passed in the context.memctx argument.
797 commitctx() does not touch the working directory.
798 commitctx() does not touch the working directory.
798 """
799 """
799 wlock = lock = None
800 wlock = lock = None
800 try:
801 try:
801 wlock = self.wlock()
802 wlock = self.wlock()
802 lock = self.lock()
803 lock = self.lock()
803 return self._commitctx(ctx, force=True, force_editor=False,
804 return self._commitctx(ctx, force=True, force_editor=False,
804 empty_ok=True, use_dirstate=False,
805 empty_ok=True, use_dirstate=False,
805 update_dirstate=False)
806 update_dirstate=False)
806 finally:
807 finally:
807 del lock, wlock
808 del lock, wlock
808
809
809 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
810 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
810 use_dirstate=True, update_dirstate=True):
811 use_dirstate=True, update_dirstate=True):
811 tr = None
812 tr = None
812 valid = 0 # don't save the dirstate if this isn't set
813 valid = 0 # don't save the dirstate if this isn't set
813 try:
814 try:
814 commit = util.sort(wctx.modified() + wctx.added())
815 commit = util.sort(wctx.modified() + wctx.added())
815 remove = wctx.removed()
816 remove = wctx.removed()
816 extra = wctx.extra().copy()
817 extra = wctx.extra().copy()
817 branchname = extra['branch']
818 branchname = extra['branch']
818 user = wctx.user()
819 user = wctx.user()
819 text = wctx.description()
820 text = wctx.description()
820
821
821 p1, p2 = [p.node() for p in wctx.parents()]
822 p1, p2 = [p.node() for p in wctx.parents()]
822 c1 = self.changelog.read(p1)
823 c1 = self.changelog.read(p1)
823 c2 = self.changelog.read(p2)
824 c2 = self.changelog.read(p2)
824 m1 = self.manifest.read(c1[0]).copy()
825 m1 = self.manifest.read(c1[0]).copy()
825 m2 = self.manifest.read(c2[0])
826 m2 = self.manifest.read(c2[0])
826
827
827 if use_dirstate:
828 if use_dirstate:
828 oldname = c1[5].get("branch") # stored in UTF-8
829 oldname = c1[5].get("branch") # stored in UTF-8
829 if (not commit and not remove and not force and p2 == nullid
830 if (not commit and not remove and not force and p2 == nullid
830 and branchname == oldname):
831 and branchname == oldname):
831 self.ui.status(_("nothing changed\n"))
832 self.ui.status(_("nothing changed\n"))
832 return None
833 return None
833
834
834 xp1 = hex(p1)
835 xp1 = hex(p1)
835 if p2 == nullid: xp2 = ''
836 if p2 == nullid: xp2 = ''
836 else: xp2 = hex(p2)
837 else: xp2 = hex(p2)
837
838
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839
840
840 tr = self.transaction()
841 tr = self.transaction()
841 trp = weakref.proxy(tr)
842 trp = weakref.proxy(tr)
842
843
843 # check in files
844 # check in files
844 new = {}
845 new = {}
845 changed = []
846 changed = []
846 linkrev = len(self)
847 linkrev = len(self)
847 for f in commit:
848 for f in commit:
848 self.ui.note(f + "\n")
849 self.ui.note(f + "\n")
849 try:
850 try:
850 fctx = wctx.filectx(f)
851 fctx = wctx.filectx(f)
851 newflags = fctx.flags()
852 newflags = fctx.flags()
852 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
853 if ((not changed or changed[-1] != f) and
854 if ((not changed or changed[-1] != f) and
854 m2.get(f) != new[f]):
855 m2.get(f) != new[f]):
855 # mention the file in the changelog if some
856 # mention the file in the changelog if some
856 # flag changed, even if there was no content
857 # flag changed, even if there was no content
857 # change.
858 # change.
858 if m1.flags(f) != newflags:
859 if m1.flags(f) != newflags:
859 changed.append(f)
860 changed.append(f)
860 m1.set(f, newflags)
861 m1.set(f, newflags)
861 if use_dirstate:
862 if use_dirstate:
862 self.dirstate.normal(f)
863 self.dirstate.normal(f)
863
864
864 except (OSError, IOError):
865 except (OSError, IOError):
865 if use_dirstate:
866 if use_dirstate:
866 self.ui.warn(_("trouble committing %s!\n") % f)
867 self.ui.warn(_("trouble committing %s!\n") % f)
867 raise
868 raise
868 else:
869 else:
869 remove.append(f)
870 remove.append(f)
870
871
871 updated, added = [], []
872 updated, added = [], []
872 for f in util.sort(changed):
873 for f in util.sort(changed):
873 if f in m1 or f in m2:
874 if f in m1 or f in m2:
874 updated.append(f)
875 updated.append(f)
875 else:
876 else:
876 added.append(f)
877 added.append(f)
877
878
878 # update manifest
879 # update manifest
879 m1.update(new)
880 m1.update(new)
880 removed = []
881 removed = []
881
882
882 for f in util.sort(remove):
883 for f in util.sort(remove):
883 if f in m1:
884 if f in m1:
884 del m1[f]
885 del m1[f]
885 removed.append(f)
886 removed.append(f)
886 elif f in m2:
887 elif f in m2:
887 removed.append(f)
888 removed.append(f)
888 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
889 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
889 (new, removed))
890 (new, removed))
890
891
891 # add changeset
892 # add changeset
892 if (not empty_ok and not text) or force_editor:
893 if (not empty_ok and not text) or force_editor:
893 edittext = []
894 edittext = []
894 if text:
895 if text:
895 edittext.append(text)
896 edittext.append(text)
896 edittext.append("")
897 edittext.append("")
897 edittext.append("") # Empty line between message and comments.
898 edittext.append("") # Empty line between message and comments.
898 edittext.append(_("HG: Enter commit message."
899 edittext.append(_("HG: Enter commit message."
899 " Lines beginning with 'HG:' are removed."))
900 " Lines beginning with 'HG:' are removed."))
900 edittext.append("HG: --")
901 edittext.append("HG: --")
901 edittext.append("HG: user: %s" % user)
902 edittext.append("HG: user: %s" % user)
902 if p2 != nullid:
903 if p2 != nullid:
903 edittext.append("HG: branch merge")
904 edittext.append("HG: branch merge")
904 if branchname:
905 if branchname:
905 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 edittext.extend(["HG: added %s" % f for f in added])
907 edittext.extend(["HG: added %s" % f for f in added])
907 edittext.extend(["HG: changed %s" % f for f in updated])
908 edittext.extend(["HG: changed %s" % f for f in updated])
908 edittext.extend(["HG: removed %s" % f for f in removed])
909 edittext.extend(["HG: removed %s" % f for f in removed])
909 if not added and not updated and not removed:
910 if not added and not updated and not removed:
910 edittext.append("HG: no files changed")
911 edittext.append("HG: no files changed")
911 edittext.append("")
912 edittext.append("")
912 # run editor in the repository root
913 # run editor in the repository root
913 olddir = os.getcwd()
914 olddir = os.getcwd()
914 os.chdir(self.root)
915 os.chdir(self.root)
915 text = self.ui.edit("\n".join(edittext), user)
916 text = self.ui.edit("\n".join(edittext), user)
916 os.chdir(olddir)
917 os.chdir(olddir)
917
918
918 lines = [line.rstrip() for line in text.rstrip().splitlines()]
919 lines = [line.rstrip() for line in text.rstrip().splitlines()]
919 while lines and not lines[0]:
920 while lines and not lines[0]:
920 del lines[0]
921 del lines[0]
921 if not lines and use_dirstate:
922 if not lines and use_dirstate:
922 raise util.Abort(_("empty commit message"))
923 raise util.Abort(_("empty commit message"))
923 text = '\n'.join(lines)
924 text = '\n'.join(lines)
924
925
925 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
926 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
926 user, wctx.date(), extra)
927 user, wctx.date(), extra)
927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 parent2=xp2)
929 parent2=xp2)
929 tr.close()
930 tr.close()
930
931
931 if self.branchcache:
932 if self.branchcache:
932 self.branchtags()
933 self.branchtags()
933
934
934 if use_dirstate or update_dirstate:
935 if use_dirstate or update_dirstate:
935 self.dirstate.setparents(n)
936 self.dirstate.setparents(n)
936 if use_dirstate:
937 if use_dirstate:
937 for f in removed:
938 for f in removed:
938 self.dirstate.forget(f)
939 self.dirstate.forget(f)
939 valid = 1 # our dirstate updates are complete
940 valid = 1 # our dirstate updates are complete
940
941
941 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
942 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
942 return n
943 return n
943 finally:
944 finally:
944 if not valid: # don't save our updated dirstate
945 if not valid: # don't save our updated dirstate
945 self.dirstate.invalidate()
946 self.dirstate.invalidate()
946 del tr
947 del tr
947
948
948 def walk(self, match, node=None):
949 def walk(self, match, node=None):
949 '''
950 '''
950 walk recursively through the directory tree or a given
951 walk recursively through the directory tree or a given
951 changeset, finding all files matched by the match
952 changeset, finding all files matched by the match
952 function
953 function
953 '''
954 '''
954 return self[node].walk(match)
955 return self[node].walk(match)
955
956
956 def status(self, node1='.', node2=None, match=None,
957 def status(self, node1='.', node2=None, match=None,
957 ignored=False, clean=False, unknown=False):
958 ignored=False, clean=False, unknown=False):
958 """return status of files between two nodes or node and working directory
959 """return status of files between two nodes or node and working directory
959
960
960 If node1 is None, use the first dirstate parent instead.
961 If node1 is None, use the first dirstate parent instead.
961 If node2 is None, compare node1 with working directory.
962 If node2 is None, compare node1 with working directory.
962 """
963 """
963
964
964 def mfmatches(ctx):
965 def mfmatches(ctx):
965 mf = ctx.manifest().copy()
966 mf = ctx.manifest().copy()
966 for fn in mf.keys():
967 for fn in mf.keys():
967 if not match(fn):
968 if not match(fn):
968 del mf[fn]
969 del mf[fn]
969 return mf
970 return mf
970
971
971 if isinstance(node1, context.changectx):
972 if isinstance(node1, context.changectx):
972 ctx1 = node1
973 ctx1 = node1
973 else:
974 else:
974 ctx1 = self[node1]
975 ctx1 = self[node1]
975 if isinstance(node2, context.changectx):
976 if isinstance(node2, context.changectx):
976 ctx2 = node2
977 ctx2 = node2
977 else:
978 else:
978 ctx2 = self[node2]
979 ctx2 = self[node2]
979
980
980 working = ctx2 == self[None]
981 working = ctx2 == self[None]
981 parentworking = working and ctx1 == self['.']
982 parentworking = working and ctx1 == self['.']
982 match = match or match_.always(self.root, self.getcwd())
983 match = match or match_.always(self.root, self.getcwd())
983 listignored, listclean, listunknown = ignored, clean, unknown
984 listignored, listclean, listunknown = ignored, clean, unknown
984
985
985 # load earliest manifest first for caching reasons
986 # load earliest manifest first for caching reasons
986 if not working and ctx2.rev() < ctx1.rev():
987 if not working and ctx2.rev() < ctx1.rev():
987 ctx2.manifest()
988 ctx2.manifest()
988
989
989 if not parentworking:
990 if not parentworking:
990 def bad(f, msg):
991 def bad(f, msg):
991 if f not in ctx1:
992 if f not in ctx1:
992 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
993 return False
994 return False
994 match.bad = bad
995 match.bad = bad
995
996
996 if working: # we need to scan the working dir
997 if working: # we need to scan the working dir
997 s = self.dirstate.status(match, listignored, listclean, listunknown)
998 s = self.dirstate.status(match, listignored, listclean, listunknown)
998 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
999 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
999
1000
1000 # check for any possibly clean files
1001 # check for any possibly clean files
1001 if parentworking and cmp:
1002 if parentworking and cmp:
1002 fixup = []
1003 fixup = []
1003 # do a full compare of any files that might have changed
1004 # do a full compare of any files that might have changed
1004 for f in cmp:
1005 for f in cmp:
1005 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1006 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1006 or ctx1[f].cmp(ctx2[f].data())):
1007 or ctx1[f].cmp(ctx2[f].data())):
1007 modified.append(f)
1008 modified.append(f)
1008 else:
1009 else:
1009 fixup.append(f)
1010 fixup.append(f)
1010
1011
1011 if listclean:
1012 if listclean:
1012 clean += fixup
1013 clean += fixup
1013
1014
1014 # update dirstate for files that are actually clean
1015 # update dirstate for files that are actually clean
1015 if fixup:
1016 if fixup:
1016 wlock = None
1017 wlock = None
1017 try:
1018 try:
1018 try:
1019 try:
1019 wlock = self.wlock(False)
1020 wlock = self.wlock(False)
1020 for f in fixup:
1021 for f in fixup:
1021 self.dirstate.normal(f)
1022 self.dirstate.normal(f)
1022 except lock.LockException:
1023 except lock.LockException:
1023 pass
1024 pass
1024 finally:
1025 finally:
1025 del wlock
1026 del wlock
1026
1027
1027 if not parentworking:
1028 if not parentworking:
1028 mf1 = mfmatches(ctx1)
1029 mf1 = mfmatches(ctx1)
1029 if working:
1030 if working:
1030 # we are comparing working dir against non-parent
1031 # we are comparing working dir against non-parent
1031 # generate a pseudo-manifest for the working dir
1032 # generate a pseudo-manifest for the working dir
1032 mf2 = mfmatches(self['.'])
1033 mf2 = mfmatches(self['.'])
1033 for f in cmp + modified + added:
1034 for f in cmp + modified + added:
1034 mf2[f] = None
1035 mf2[f] = None
1035 mf2.set(f, ctx2.flags(f))
1036 mf2.set(f, ctx2.flags(f))
1036 for f in removed:
1037 for f in removed:
1037 if f in mf2:
1038 if f in mf2:
1038 del mf2[f]
1039 del mf2[f]
1039 else:
1040 else:
1040 # we are comparing two revisions
1041 # we are comparing two revisions
1041 deleted, unknown, ignored = [], [], []
1042 deleted, unknown, ignored = [], [], []
1042 mf2 = mfmatches(ctx2)
1043 mf2 = mfmatches(ctx2)
1043
1044
1044 modified, added, clean = [], [], []
1045 modified, added, clean = [], [], []
1045 for fn in mf2:
1046 for fn in mf2:
1046 if fn in mf1:
1047 if fn in mf1:
1047 if (mf1.flags(fn) != mf2.flags(fn) or
1048 if (mf1.flags(fn) != mf2.flags(fn) or
1048 (mf1[fn] != mf2[fn] and
1049 (mf1[fn] != mf2[fn] and
1049 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1050 modified.append(fn)
1051 modified.append(fn)
1051 elif listclean:
1052 elif listclean:
1052 clean.append(fn)
1053 clean.append(fn)
1053 del mf1[fn]
1054 del mf1[fn]
1054 else:
1055 else:
1055 added.append(fn)
1056 added.append(fn)
1056 removed = mf1.keys()
1057 removed = mf1.keys()
1057
1058
1058 r = modified, added, removed, deleted, unknown, ignored, clean
1059 r = modified, added, removed, deleted, unknown, ignored, clean
1059 [l.sort() for l in r]
1060 [l.sort() for l in r]
1060 return r
1061 return r
1061
1062
1062 def add(self, list):
1063 def add(self, list):
1063 wlock = self.wlock()
1064 wlock = self.wlock()
1064 try:
1065 try:
1065 rejected = []
1066 rejected = []
1066 for f in list:
1067 for f in list:
1067 p = self.wjoin(f)
1068 p = self.wjoin(f)
1068 try:
1069 try:
1069 st = os.lstat(p)
1070 st = os.lstat(p)
1070 except:
1071 except:
1071 self.ui.warn(_("%s does not exist!\n") % f)
1072 self.ui.warn(_("%s does not exist!\n") % f)
1072 rejected.append(f)
1073 rejected.append(f)
1073 continue
1074 continue
1074 if st.st_size > 10000000:
1075 if st.st_size > 10000000:
1075 self.ui.warn(_("%s: files over 10MB may cause memory and"
1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1076 " performance problems\n"
1077 " performance problems\n"
1077 "(use 'hg revert %s' to unadd the file)\n")
1078 "(use 'hg revert %s' to unadd the file)\n")
1078 % (f, f))
1079 % (f, f))
1079 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1080 self.ui.warn(_("%s not added: only files and symlinks "
1081 self.ui.warn(_("%s not added: only files and symlinks "
1081 "supported currently\n") % f)
1082 "supported currently\n") % f)
1082 rejected.append(p)
1083 rejected.append(p)
1083 elif self.dirstate[f] in 'amn':
1084 elif self.dirstate[f] in 'amn':
1084 self.ui.warn(_("%s already tracked!\n") % f)
1085 self.ui.warn(_("%s already tracked!\n") % f)
1085 elif self.dirstate[f] == 'r':
1086 elif self.dirstate[f] == 'r':
1086 self.dirstate.normallookup(f)
1087 self.dirstate.normallookup(f)
1087 else:
1088 else:
1088 self.dirstate.add(f)
1089 self.dirstate.add(f)
1089 return rejected
1090 return rejected
1090 finally:
1091 finally:
1091 del wlock
1092 del wlock
1092
1093
1093 def forget(self, list):
1094 def forget(self, list):
1094 wlock = self.wlock()
1095 wlock = self.wlock()
1095 try:
1096 try:
1096 for f in list:
1097 for f in list:
1097 if self.dirstate[f] != 'a':
1098 if self.dirstate[f] != 'a':
1098 self.ui.warn(_("%s not added!\n") % f)
1099 self.ui.warn(_("%s not added!\n") % f)
1099 else:
1100 else:
1100 self.dirstate.forget(f)
1101 self.dirstate.forget(f)
1101 finally:
1102 finally:
1102 del wlock
1103 del wlock
1103
1104
1104 def remove(self, list, unlink=False):
1105 def remove(self, list, unlink=False):
1105 wlock = None
1106 wlock = None
1106 try:
1107 try:
1107 if unlink:
1108 if unlink:
1108 for f in list:
1109 for f in list:
1109 try:
1110 try:
1110 util.unlink(self.wjoin(f))
1111 util.unlink(self.wjoin(f))
1111 except OSError, inst:
1112 except OSError, inst:
1112 if inst.errno != errno.ENOENT:
1113 if inst.errno != errno.ENOENT:
1113 raise
1114 raise
1114 wlock = self.wlock()
1115 wlock = self.wlock()
1115 for f in list:
1116 for f in list:
1116 if unlink and os.path.exists(self.wjoin(f)):
1117 if unlink and os.path.exists(self.wjoin(f)):
1117 self.ui.warn(_("%s still exists!\n") % f)
1118 self.ui.warn(_("%s still exists!\n") % f)
1118 elif self.dirstate[f] == 'a':
1119 elif self.dirstate[f] == 'a':
1119 self.dirstate.forget(f)
1120 self.dirstate.forget(f)
1120 elif f not in self.dirstate:
1121 elif f not in self.dirstate:
1121 self.ui.warn(_("%s not tracked!\n") % f)
1122 self.ui.warn(_("%s not tracked!\n") % f)
1122 else:
1123 else:
1123 self.dirstate.remove(f)
1124 self.dirstate.remove(f)
1124 finally:
1125 finally:
1125 del wlock
1126 del wlock
1126
1127
1127 def undelete(self, list):
1128 def undelete(self, list):
1128 wlock = None
1129 wlock = None
1129 try:
1130 try:
1130 manifests = [self.manifest.read(self.changelog.read(p)[0])
1131 manifests = [self.manifest.read(self.changelog.read(p)[0])
1131 for p in self.dirstate.parents() if p != nullid]
1132 for p in self.dirstate.parents() if p != nullid]
1132 wlock = self.wlock()
1133 wlock = self.wlock()
1133 for f in list:
1134 for f in list:
1134 if self.dirstate[f] != 'r':
1135 if self.dirstate[f] != 'r':
1135 self.ui.warn(_("%s not removed!\n") % f)
1136 self.ui.warn(_("%s not removed!\n") % f)
1136 else:
1137 else:
1137 m = f in manifests[0] and manifests[0] or manifests[1]
1138 m = f in manifests[0] and manifests[0] or manifests[1]
1138 t = self.file(f).read(m[f])
1139 t = self.file(f).read(m[f])
1139 self.wwrite(f, t, m.flags(f))
1140 self.wwrite(f, t, m.flags(f))
1140 self.dirstate.normal(f)
1141 self.dirstate.normal(f)
1141 finally:
1142 finally:
1142 del wlock
1143 del wlock
1143
1144
1144 def copy(self, source, dest):
1145 def copy(self, source, dest):
1145 wlock = None
1146 wlock = None
1146 try:
1147 try:
1147 p = self.wjoin(dest)
1148 p = self.wjoin(dest)
1148 if not (os.path.exists(p) or os.path.islink(p)):
1149 if not (os.path.exists(p) or os.path.islink(p)):
1149 self.ui.warn(_("%s does not exist!\n") % dest)
1150 self.ui.warn(_("%s does not exist!\n") % dest)
1150 elif not (os.path.isfile(p) or os.path.islink(p)):
1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1151 self.ui.warn(_("copy failed: %s is not a file or a "
1152 self.ui.warn(_("copy failed: %s is not a file or a "
1152 "symbolic link\n") % dest)
1153 "symbolic link\n") % dest)
1153 else:
1154 else:
1154 wlock = self.wlock()
1155 wlock = self.wlock()
1155 if self.dirstate[dest] in '?r':
1156 if self.dirstate[dest] in '?r':
1156 self.dirstate.add(dest)
1157 self.dirstate.add(dest)
1157 self.dirstate.copy(source, dest)
1158 self.dirstate.copy(source, dest)
1158 finally:
1159 finally:
1159 del wlock
1160 del wlock
1160
1161
1161 def heads(self, start=None):
1162 def heads(self, start=None):
1162 heads = self.changelog.heads(start)
1163 heads = self.changelog.heads(start)
1163 # sort the output in rev descending order
1164 # sort the output in rev descending order
1164 heads = [(-self.changelog.rev(h), h) for h in heads]
1165 heads = [(-self.changelog.rev(h), h) for h in heads]
1165 return [n for (r, n) in util.sort(heads)]
1166 return [n for (r, n) in util.sort(heads)]
1166
1167
1167 def branchheads(self, branch=None, start=None):
1168 def branchheads(self, branch=None, start=None):
1168 if branch is None:
1169 if branch is None:
1169 branch = self[None].branch()
1170 branch = self[None].branch()
1170 branches = self.branchtags()
1171 branches = self.branchtags()
1171 if branch not in branches:
1172 if branch not in branches:
1172 return []
1173 return []
1173 # The basic algorithm is this:
1174 # The basic algorithm is this:
1174 #
1175 #
1175 # Start from the branch tip since there are no later revisions that can
1176 # Start from the branch tip since there are no later revisions that can
1176 # possibly be in this branch, and the tip is a guaranteed head.
1177 # possibly be in this branch, and the tip is a guaranteed head.
1177 #
1178 #
1178 # Remember the tip's parents as the first ancestors, since these by
1179 # Remember the tip's parents as the first ancestors, since these by
1179 # definition are not heads.
1180 # definition are not heads.
1180 #
1181 #
1181 # Step backwards from the brach tip through all the revisions. We are
1182 # Step backwards from the brach tip through all the revisions. We are
1182 # guaranteed by the rules of Mercurial that we will now be visiting the
1183 # guaranteed by the rules of Mercurial that we will now be visiting the
1183 # nodes in reverse topological order (children before parents).
1184 # nodes in reverse topological order (children before parents).
1184 #
1185 #
1185 # If a revision is one of the ancestors of a head then we can toss it
1186 # If a revision is one of the ancestors of a head then we can toss it
1186 # out of the ancestors set (we've already found it and won't be
1187 # out of the ancestors set (we've already found it and won't be
1187 # visiting it again) and put its parents in the ancestors set.
1188 # visiting it again) and put its parents in the ancestors set.
1188 #
1189 #
1189 # Otherwise, if a revision is in the branch it's another head, since it
1190 # Otherwise, if a revision is in the branch it's another head, since it
1190 # wasn't in the ancestor list of an existing head. So add it to the
1191 # wasn't in the ancestor list of an existing head. So add it to the
1191 # head list, and add its parents to the ancestor list.
1192 # head list, and add its parents to the ancestor list.
1192 #
1193 #
1193 # If it is not in the branch ignore it.
1194 # If it is not in the branch ignore it.
1194 #
1195 #
1195 # Once we have a list of heads, use nodesbetween to filter out all the
1196 # Once we have a list of heads, use nodesbetween to filter out all the
1196 # heads that cannot be reached from startrev. There may be a more
1197 # heads that cannot be reached from startrev. There may be a more
1197 # efficient way to do this as part of the previous algorithm.
1198 # efficient way to do this as part of the previous algorithm.
1198
1199
1199 set = util.set
1200 set = util.set
1200 heads = [self.changelog.rev(branches[branch])]
1201 heads = [self.changelog.rev(branches[branch])]
1201 # Don't care if ancestors contains nullrev or not.
1202 # Don't care if ancestors contains nullrev or not.
1202 ancestors = set(self.changelog.parentrevs(heads[0]))
1203 ancestors = set(self.changelog.parentrevs(heads[0]))
1203 for rev in xrange(heads[0] - 1, nullrev, -1):
1204 for rev in xrange(heads[0] - 1, nullrev, -1):
1204 if rev in ancestors:
1205 if rev in ancestors:
1205 ancestors.update(self.changelog.parentrevs(rev))
1206 ancestors.update(self.changelog.parentrevs(rev))
1206 ancestors.remove(rev)
1207 ancestors.remove(rev)
1207 elif self[rev].branch() == branch:
1208 elif self[rev].branch() == branch:
1208 heads.append(rev)
1209 heads.append(rev)
1209 ancestors.update(self.changelog.parentrevs(rev))
1210 ancestors.update(self.changelog.parentrevs(rev))
1210 heads = [self.changelog.node(rev) for rev in heads]
1211 heads = [self.changelog.node(rev) for rev in heads]
1211 if start is not None:
1212 if start is not None:
1212 heads = self.changelog.nodesbetween([start], heads)[2]
1213 heads = self.changelog.nodesbetween([start], heads)[2]
1213 return heads
1214 return heads
1214
1215
1215 def branches(self, nodes):
1216 def branches(self, nodes):
1216 if not nodes:
1217 if not nodes:
1217 nodes = [self.changelog.tip()]
1218 nodes = [self.changelog.tip()]
1218 b = []
1219 b = []
1219 for n in nodes:
1220 for n in nodes:
1220 t = n
1221 t = n
1221 while 1:
1222 while 1:
1222 p = self.changelog.parents(n)
1223 p = self.changelog.parents(n)
1223 if p[1] != nullid or p[0] == nullid:
1224 if p[1] != nullid or p[0] == nullid:
1224 b.append((t, n, p[0], p[1]))
1225 b.append((t, n, p[0], p[1]))
1225 break
1226 break
1226 n = p[0]
1227 n = p[0]
1227 return b
1228 return b
1228
1229
1229 def between(self, pairs):
1230 def between(self, pairs):
1230 r = []
1231 r = []
1231
1232
1232 for top, bottom in pairs:
1233 for top, bottom in pairs:
1233 n, l, i = top, [], 0
1234 n, l, i = top, [], 0
1234 f = 1
1235 f = 1
1235
1236
1236 while n != bottom:
1237 while n != bottom:
1237 p = self.changelog.parents(n)[0]
1238 p = self.changelog.parents(n)[0]
1238 if i == f:
1239 if i == f:
1239 l.append(n)
1240 l.append(n)
1240 f = f * 2
1241 f = f * 2
1241 n = p
1242 n = p
1242 i += 1
1243 i += 1
1243
1244
1244 r.append(l)
1245 r.append(l)
1245
1246
1246 return r
1247 return r
1247
1248
1248 def findincoming(self, remote, base=None, heads=None, force=False):
1249 def findincoming(self, remote, base=None, heads=None, force=False):
1249 """Return list of roots of the subsets of missing nodes from remote
1250 """Return list of roots of the subsets of missing nodes from remote
1250
1251
1251 If base dict is specified, assume that these nodes and their parents
1252 If base dict is specified, assume that these nodes and their parents
1252 exist on the remote side and that no child of a node of base exists
1253 exist on the remote side and that no child of a node of base exists
1253 in both remote and self.
1254 in both remote and self.
1254 Furthermore base will be updated to include the nodes that exists
1255 Furthermore base will be updated to include the nodes that exists
1255 in self and remote but no children exists in self and remote.
1256 in self and remote but no children exists in self and remote.
1256 If a list of heads is specified, return only nodes which are heads
1257 If a list of heads is specified, return only nodes which are heads
1257 or ancestors of these heads.
1258 or ancestors of these heads.
1258
1259
1259 All the ancestors of base are in self and in remote.
1260 All the ancestors of base are in self and in remote.
1260 All the descendants of the list returned are missing in self.
1261 All the descendants of the list returned are missing in self.
1261 (and so we know that the rest of the nodes are missing in remote, see
1262 (and so we know that the rest of the nodes are missing in remote, see
1262 outgoing)
1263 outgoing)
1263 """
1264 """
1264 m = self.changelog.nodemap
1265 m = self.changelog.nodemap
1265 search = []
1266 search = []
1266 fetch = {}
1267 fetch = {}
1267 seen = {}
1268 seen = {}
1268 seenbranch = {}
1269 seenbranch = {}
1269 if base == None:
1270 if base == None:
1270 base = {}
1271 base = {}
1271
1272
1272 if not heads:
1273 if not heads:
1273 heads = remote.heads()
1274 heads = remote.heads()
1274
1275
1275 if self.changelog.tip() == nullid:
1276 if self.changelog.tip() == nullid:
1276 base[nullid] = 1
1277 base[nullid] = 1
1277 if heads != [nullid]:
1278 if heads != [nullid]:
1278 return [nullid]
1279 return [nullid]
1279 return []
1280 return []
1280
1281
1281 # assume we're closer to the tip than the root
1282 # assume we're closer to the tip than the root
1282 # and start by examining the heads
1283 # and start by examining the heads
1283 self.ui.status(_("searching for changes\n"))
1284 self.ui.status(_("searching for changes\n"))
1284
1285
1285 unknown = []
1286 unknown = []
1286 for h in heads:
1287 for h in heads:
1287 if h not in m:
1288 if h not in m:
1288 unknown.append(h)
1289 unknown.append(h)
1289 else:
1290 else:
1290 base[h] = 1
1291 base[h] = 1
1291
1292
1292 if not unknown:
1293 if not unknown:
1293 return []
1294 return []
1294
1295
1295 req = dict.fromkeys(unknown)
1296 req = dict.fromkeys(unknown)
1296 reqcnt = 0
1297 reqcnt = 0
1297
1298
1298 # search through remote branches
1299 # search through remote branches
1299 # a 'branch' here is a linear segment of history, with four parts:
1300 # a 'branch' here is a linear segment of history, with four parts:
1300 # head, root, first parent, second parent
1301 # head, root, first parent, second parent
1301 # (a branch always has two parents (or none) by definition)
1302 # (a branch always has two parents (or none) by definition)
1302 unknown = remote.branches(unknown)
1303 unknown = remote.branches(unknown)
1303 while unknown:
1304 while unknown:
1304 r = []
1305 r = []
1305 while unknown:
1306 while unknown:
1306 n = unknown.pop(0)
1307 n = unknown.pop(0)
1307 if n[0] in seen:
1308 if n[0] in seen:
1308 continue
1309 continue
1309
1310
1310 self.ui.debug(_("examining %s:%s\n")
1311 self.ui.debug(_("examining %s:%s\n")
1311 % (short(n[0]), short(n[1])))
1312 % (short(n[0]), short(n[1])))
1312 if n[0] == nullid: # found the end of the branch
1313 if n[0] == nullid: # found the end of the branch
1313 pass
1314 pass
1314 elif n in seenbranch:
1315 elif n in seenbranch:
1315 self.ui.debug(_("branch already found\n"))
1316 self.ui.debug(_("branch already found\n"))
1316 continue
1317 continue
1317 elif n[1] and n[1] in m: # do we know the base?
1318 elif n[1] and n[1] in m: # do we know the base?
1318 self.ui.debug(_("found incomplete branch %s:%s\n")
1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1319 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1320 search.append(n) # schedule branch range for scanning
1321 search.append(n) # schedule branch range for scanning
1321 seenbranch[n] = 1
1322 seenbranch[n] = 1
1322 else:
1323 else:
1323 if n[1] not in seen and n[1] not in fetch:
1324 if n[1] not in seen and n[1] not in fetch:
1324 if n[2] in m and n[3] in m:
1325 if n[2] in m and n[3] in m:
1325 self.ui.debug(_("found new changeset %s\n") %
1326 self.ui.debug(_("found new changeset %s\n") %
1326 short(n[1]))
1327 short(n[1]))
1327 fetch[n[1]] = 1 # earliest unknown
1328 fetch[n[1]] = 1 # earliest unknown
1328 for p in n[2:4]:
1329 for p in n[2:4]:
1329 if p in m:
1330 if p in m:
1330 base[p] = 1 # latest known
1331 base[p] = 1 # latest known
1331
1332
1332 for p in n[2:4]:
1333 for p in n[2:4]:
1333 if p not in req and p not in m:
1334 if p not in req and p not in m:
1334 r.append(p)
1335 r.append(p)
1335 req[p] = 1
1336 req[p] = 1
1336 seen[n[0]] = 1
1337 seen[n[0]] = 1
1337
1338
1338 if r:
1339 if r:
1339 reqcnt += 1
1340 reqcnt += 1
1340 self.ui.debug(_("request %d: %s\n") %
1341 self.ui.debug(_("request %d: %s\n") %
1341 (reqcnt, " ".join(map(short, r))))
1342 (reqcnt, " ".join(map(short, r))))
1342 for p in xrange(0, len(r), 10):
1343 for p in xrange(0, len(r), 10):
1343 for b in remote.branches(r[p:p+10]):
1344 for b in remote.branches(r[p:p+10]):
1344 self.ui.debug(_("received %s:%s\n") %
1345 self.ui.debug(_("received %s:%s\n") %
1345 (short(b[0]), short(b[1])))
1346 (short(b[0]), short(b[1])))
1346 unknown.append(b)
1347 unknown.append(b)
1347
1348
1348 # do binary search on the branches we found
1349 # do binary search on the branches we found
1349 search = [(t, b) for (t, b, p1, p2) in search]
1350 search = [(t, b) for (t, b, p1, p2) in search]
1350 while search:
1351 while search:
1351 newsearch = []
1352 newsearch = []
1352 reqcnt += 1
1353 reqcnt += 1
1353 for n, l in zip(search, remote.between(search)):
1354 for n, l in zip(search, remote.between(search)):
1354 l.append(n[1])
1355 l.append(n[1])
1355 p = n[0]
1356 p = n[0]
1356 f = 1
1357 f = 1
1357 for i in l:
1358 for i in l:
1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1359 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1359 if i in m:
1360 if i in m:
1360 if f <= 2:
1361 if f <= 2:
1361 self.ui.debug(_("found new branch changeset %s\n") %
1362 self.ui.debug(_("found new branch changeset %s\n") %
1362 short(p))
1363 short(p))
1363 fetch[p] = 1
1364 fetch[p] = 1
1364 base[i] = 1
1365 base[i] = 1
1365 else:
1366 else:
1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1367 self.ui.debug(_("narrowed branch search to %s:%s\n")
1367 % (short(p), short(i)))
1368 % (short(p), short(i)))
1368 newsearch.append((p, i))
1369 newsearch.append((p, i))
1369 break
1370 break
1370 p, f = i, f * 2
1371 p, f = i, f * 2
1371 search = newsearch
1372 search = newsearch
1372
1373
1373 # sanity check our fetch list
1374 # sanity check our fetch list
1374 for f in fetch.keys():
1375 for f in fetch.keys():
1375 if f in m:
1376 if f in m:
1376 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1377 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1377
1378
1378 if base.keys() == [nullid]:
1379 if base.keys() == [nullid]:
1379 if force:
1380 if force:
1380 self.ui.warn(_("warning: repository is unrelated\n"))
1381 self.ui.warn(_("warning: repository is unrelated\n"))
1381 else:
1382 else:
1382 raise util.Abort(_("repository is unrelated"))
1383 raise util.Abort(_("repository is unrelated"))
1383
1384
1384 self.ui.debug(_("found new changesets starting at ") +
1385 self.ui.debug(_("found new changesets starting at ") +
1385 " ".join([short(f) for f in fetch]) + "\n")
1386 " ".join([short(f) for f in fetch]) + "\n")
1386
1387
1387 self.ui.debug(_("%d total queries\n") % reqcnt)
1388 self.ui.debug(_("%d total queries\n") % reqcnt)
1388
1389
1389 return fetch.keys()
1390 return fetch.keys()
1390
1391
1391 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 """Return list of nodes that are roots of subsets not in remote
1393 """Return list of nodes that are roots of subsets not in remote
1393
1394
1394 If base dict is specified, assume that these nodes and their parents
1395 If base dict is specified, assume that these nodes and their parents
1395 exist on the remote side.
1396 exist on the remote side.
1396 If a list of heads is specified, return only nodes which are heads
1397 If a list of heads is specified, return only nodes which are heads
1397 or ancestors of these heads, and return a second element which
1398 or ancestors of these heads, and return a second element which
1398 contains all remote heads which get new children.
1399 contains all remote heads which get new children.
1399 """
1400 """
1400 if base == None:
1401 if base == None:
1401 base = {}
1402 base = {}
1402 self.findincoming(remote, base, heads, force=force)
1403 self.findincoming(remote, base, heads, force=force)
1403
1404
1404 self.ui.debug(_("common changesets up to ")
1405 self.ui.debug(_("common changesets up to ")
1405 + " ".join(map(short, base.keys())) + "\n")
1406 + " ".join(map(short, base.keys())) + "\n")
1406
1407
1407 remain = dict.fromkeys(self.changelog.nodemap)
1408 remain = dict.fromkeys(self.changelog.nodemap)
1408
1409
1409 # prune everything remote has from the tree
1410 # prune everything remote has from the tree
1410 del remain[nullid]
1411 del remain[nullid]
1411 remove = base.keys()
1412 remove = base.keys()
1412 while remove:
1413 while remove:
1413 n = remove.pop(0)
1414 n = remove.pop(0)
1414 if n in remain:
1415 if n in remain:
1415 del remain[n]
1416 del remain[n]
1416 for p in self.changelog.parents(n):
1417 for p in self.changelog.parents(n):
1417 remove.append(p)
1418 remove.append(p)
1418
1419
1419 # find every node whose parents have been pruned
1420 # find every node whose parents have been pruned
1420 subset = []
1421 subset = []
1421 # find every remote head that will get new children
1422 # find every remote head that will get new children
1422 updated_heads = {}
1423 updated_heads = {}
1423 for n in remain:
1424 for n in remain:
1424 p1, p2 = self.changelog.parents(n)
1425 p1, p2 = self.changelog.parents(n)
1425 if p1 not in remain and p2 not in remain:
1426 if p1 not in remain and p2 not in remain:
1426 subset.append(n)
1427 subset.append(n)
1427 if heads:
1428 if heads:
1428 if p1 in heads:
1429 if p1 in heads:
1429 updated_heads[p1] = True
1430 updated_heads[p1] = True
1430 if p2 in heads:
1431 if p2 in heads:
1431 updated_heads[p2] = True
1432 updated_heads[p2] = True
1432
1433
1433 # this is the set of all roots we have to push
1434 # this is the set of all roots we have to push
1434 if heads:
1435 if heads:
1435 return subset, updated_heads.keys()
1436 return subset, updated_heads.keys()
1436 else:
1437 else:
1437 return subset
1438 return subset
1438
1439
1439 def pull(self, remote, heads=None, force=False):
1440 def pull(self, remote, heads=None, force=False):
1440 lock = self.lock()
1441 lock = self.lock()
1441 try:
1442 try:
1442 fetch = self.findincoming(remote, heads=heads, force=force)
1443 fetch = self.findincoming(remote, heads=heads, force=force)
1443 if fetch == [nullid]:
1444 if fetch == [nullid]:
1444 self.ui.status(_("requesting all changes\n"))
1445 self.ui.status(_("requesting all changes\n"))
1445
1446
1446 if not fetch:
1447 if not fetch:
1447 self.ui.status(_("no changes found\n"))
1448 self.ui.status(_("no changes found\n"))
1448 return 0
1449 return 0
1449
1450
1450 if heads is None:
1451 if heads is None:
1451 cg = remote.changegroup(fetch, 'pull')
1452 cg = remote.changegroup(fetch, 'pull')
1452 else:
1453 else:
1453 if 'changegroupsubset' not in remote.capabilities:
1454 if 'changegroupsubset' not in remote.capabilities:
1454 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1455 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1455 cg = remote.changegroupsubset(fetch, heads, 'pull')
1456 cg = remote.changegroupsubset(fetch, heads, 'pull')
1456 return self.addchangegroup(cg, 'pull', remote.url())
1457 return self.addchangegroup(cg, 'pull', remote.url())
1457 finally:
1458 finally:
1458 del lock
1459 del lock
1459
1460
1460 def push(self, remote, force=False, revs=None):
1461 def push(self, remote, force=False, revs=None):
1461 # there are two ways to push to remote repo:
1462 # there are two ways to push to remote repo:
1462 #
1463 #
1463 # addchangegroup assumes local user can lock remote
1464 # addchangegroup assumes local user can lock remote
1464 # repo (local filesystem, old ssh servers).
1465 # repo (local filesystem, old ssh servers).
1465 #
1466 #
1466 # unbundle assumes local user cannot lock remote repo (new ssh
1467 # unbundle assumes local user cannot lock remote repo (new ssh
1467 # servers, http servers).
1468 # servers, http servers).
1468
1469
1469 if remote.capable('unbundle'):
1470 if remote.capable('unbundle'):
1470 return self.push_unbundle(remote, force, revs)
1471 return self.push_unbundle(remote, force, revs)
1471 return self.push_addchangegroup(remote, force, revs)
1472 return self.push_addchangegroup(remote, force, revs)
1472
1473
1473 def prepush(self, remote, force, revs):
1474 def prepush(self, remote, force, revs):
1474 base = {}
1475 base = {}
1475 remote_heads = remote.heads()
1476 remote_heads = remote.heads()
1476 inc = self.findincoming(remote, base, remote_heads, force=force)
1477 inc = self.findincoming(remote, base, remote_heads, force=force)
1477
1478
1478 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1479 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1479 if revs is not None:
1480 if revs is not None:
1480 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1481 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1481 else:
1482 else:
1482 bases, heads = update, self.changelog.heads()
1483 bases, heads = update, self.changelog.heads()
1483
1484
1484 if not bases:
1485 if not bases:
1485 self.ui.status(_("no changes found\n"))
1486 self.ui.status(_("no changes found\n"))
1486 return None, 1
1487 return None, 1
1487 elif not force:
1488 elif not force:
1488 # check if we're creating new remote heads
1489 # check if we're creating new remote heads
1489 # to be a remote head after push, node must be either
1490 # to be a remote head after push, node must be either
1490 # - unknown locally
1491 # - unknown locally
1491 # - a local outgoing head descended from update
1492 # - a local outgoing head descended from update
1492 # - a remote head that's known locally and not
1493 # - a remote head that's known locally and not
1493 # ancestral to an outgoing head
1494 # ancestral to an outgoing head
1494
1495
1495 warn = 0
1496 warn = 0
1496
1497
1497 if remote_heads == [nullid]:
1498 if remote_heads == [nullid]:
1498 warn = 0
1499 warn = 0
1499 elif not revs and len(heads) > len(remote_heads):
1500 elif not revs and len(heads) > len(remote_heads):
1500 warn = 1
1501 warn = 1
1501 else:
1502 else:
1502 newheads = list(heads)
1503 newheads = list(heads)
1503 for r in remote_heads:
1504 for r in remote_heads:
1504 if r in self.changelog.nodemap:
1505 if r in self.changelog.nodemap:
1505 desc = self.changelog.heads(r, heads)
1506 desc = self.changelog.heads(r, heads)
1506 l = [h for h in heads if h in desc]
1507 l = [h for h in heads if h in desc]
1507 if not l:
1508 if not l:
1508 newheads.append(r)
1509 newheads.append(r)
1509 else:
1510 else:
1510 newheads.append(r)
1511 newheads.append(r)
1511 if len(newheads) > len(remote_heads):
1512 if len(newheads) > len(remote_heads):
1512 warn = 1
1513 warn = 1
1513
1514
1514 if warn:
1515 if warn:
1515 self.ui.warn(_("abort: push creates new remote heads!\n"))
1516 self.ui.warn(_("abort: push creates new remote heads!\n"))
1516 self.ui.status(_("(did you forget to merge?"
1517 self.ui.status(_("(did you forget to merge?"
1517 " use push -f to force)\n"))
1518 " use push -f to force)\n"))
1518 return None, 0
1519 return None, 0
1519 elif inc:
1520 elif inc:
1520 self.ui.warn(_("note: unsynced remote changes!\n"))
1521 self.ui.warn(_("note: unsynced remote changes!\n"))
1521
1522
1522
1523
1523 if revs is None:
1524 if revs is None:
1524 cg = self.changegroup(update, 'push')
1525 cg = self.changegroup(update, 'push')
1525 else:
1526 else:
1526 cg = self.changegroupsubset(update, revs, 'push')
1527 cg = self.changegroupsubset(update, revs, 'push')
1527 return cg, remote_heads
1528 return cg, remote_heads
1528
1529
1529 def push_addchangegroup(self, remote, force, revs):
1530 def push_addchangegroup(self, remote, force, revs):
1530 lock = remote.lock()
1531 lock = remote.lock()
1531 try:
1532 try:
1532 ret = self.prepush(remote, force, revs)
1533 ret = self.prepush(remote, force, revs)
1533 if ret[0] is not None:
1534 if ret[0] is not None:
1534 cg, remote_heads = ret
1535 cg, remote_heads = ret
1535 return remote.addchangegroup(cg, 'push', self.url())
1536 return remote.addchangegroup(cg, 'push', self.url())
1536 return ret[1]
1537 return ret[1]
1537 finally:
1538 finally:
1538 del lock
1539 del lock
1539
1540
1540 def push_unbundle(self, remote, force, revs):
1541 def push_unbundle(self, remote, force, revs):
1541 # local repo finds heads on server, finds out what revs it
1542 # local repo finds heads on server, finds out what revs it
1542 # must push. once revs transferred, if server finds it has
1543 # must push. once revs transferred, if server finds it has
1543 # different heads (someone else won commit/push race), server
1544 # different heads (someone else won commit/push race), server
1544 # aborts.
1545 # aborts.
1545
1546
1546 ret = self.prepush(remote, force, revs)
1547 ret = self.prepush(remote, force, revs)
1547 if ret[0] is not None:
1548 if ret[0] is not None:
1548 cg, remote_heads = ret
1549 cg, remote_heads = ret
1549 if force: remote_heads = ['force']
1550 if force: remote_heads = ['force']
1550 return remote.unbundle(cg, remote_heads, 'push')
1551 return remote.unbundle(cg, remote_heads, 'push')
1551 return ret[1]
1552 return ret[1]
1552
1553
1553 def changegroupinfo(self, nodes, source):
1554 def changegroupinfo(self, nodes, source):
1554 if self.ui.verbose or source == 'bundle':
1555 if self.ui.verbose or source == 'bundle':
1555 self.ui.status(_("%d changesets found\n") % len(nodes))
1556 self.ui.status(_("%d changesets found\n") % len(nodes))
1556 if self.ui.debugflag:
1557 if self.ui.debugflag:
1557 self.ui.debug(_("List of changesets:\n"))
1558 self.ui.debug(_("List of changesets:\n"))
1558 for node in nodes:
1559 for node in nodes:
1559 self.ui.debug("%s\n" % hex(node))
1560 self.ui.debug("%s\n" % hex(node))
1560
1561
1561 def changegroupsubset(self, bases, heads, source, extranodes=None):
1562 def changegroupsubset(self, bases, heads, source, extranodes=None):
1562 """This function generates a changegroup consisting of all the nodes
1563 """This function generates a changegroup consisting of all the nodes
1563 that are descendents of any of the bases, and ancestors of any of
1564 that are descendents of any of the bases, and ancestors of any of
1564 the heads.
1565 the heads.
1565
1566
1566 It is fairly complex as determining which filenodes and which
1567 It is fairly complex as determining which filenodes and which
1567 manifest nodes need to be included for the changeset to be complete
1568 manifest nodes need to be included for the changeset to be complete
1568 is non-trivial.
1569 is non-trivial.
1569
1570
1570 Another wrinkle is doing the reverse, figuring out which changeset in
1571 Another wrinkle is doing the reverse, figuring out which changeset in
1571 the changegroup a particular filenode or manifestnode belongs to.
1572 the changegroup a particular filenode or manifestnode belongs to.
1572
1573
1573 The caller can specify some nodes that must be included in the
1574 The caller can specify some nodes that must be included in the
1574 changegroup using the extranodes argument. It should be a dict
1575 changegroup using the extranodes argument. It should be a dict
1575 where the keys are the filenames (or 1 for the manifest), and the
1576 where the keys are the filenames (or 1 for the manifest), and the
1576 values are lists of (node, linknode) tuples, where node is a wanted
1577 values are lists of (node, linknode) tuples, where node is a wanted
1577 node and linknode is the changelog node that should be transmitted as
1578 node and linknode is the changelog node that should be transmitted as
1578 the linkrev.
1579 the linkrev.
1579 """
1580 """
1580
1581
1581 if extranodes is None:
1582 if extranodes is None:
1582 # can we go through the fast path ?
1583 # can we go through the fast path ?
1583 heads.sort()
1584 heads.sort()
1584 allheads = self.heads()
1585 allheads = self.heads()
1585 allheads.sort()
1586 allheads.sort()
1586 if heads == allheads:
1587 if heads == allheads:
1587 common = []
1588 common = []
1588 # parents of bases are known from both sides
1589 # parents of bases are known from both sides
1589 for n in bases:
1590 for n in bases:
1590 for p in self.changelog.parents(n):
1591 for p in self.changelog.parents(n):
1591 if p != nullid:
1592 if p != nullid:
1592 common.append(p)
1593 common.append(p)
1593 return self._changegroup(common, source)
1594 return self._changegroup(common, source)
1594
1595
1595 self.hook('preoutgoing', throw=True, source=source)
1596 self.hook('preoutgoing', throw=True, source=source)
1596
1597
1597 # Set up some initial variables
1598 # Set up some initial variables
1598 # Make it easy to refer to self.changelog
1599 # Make it easy to refer to self.changelog
1599 cl = self.changelog
1600 cl = self.changelog
1600 # msng is short for missing - compute the list of changesets in this
1601 # msng is short for missing - compute the list of changesets in this
1601 # changegroup.
1602 # changegroup.
1602 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1603 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1603 self.changegroupinfo(msng_cl_lst, source)
1604 self.changegroupinfo(msng_cl_lst, source)
1604 # Some bases may turn out to be superfluous, and some heads may be
1605 # Some bases may turn out to be superfluous, and some heads may be
1605 # too. nodesbetween will return the minimal set of bases and heads
1606 # too. nodesbetween will return the minimal set of bases and heads
1606 # necessary to re-create the changegroup.
1607 # necessary to re-create the changegroup.
1607
1608
1608 # Known heads are the list of heads that it is assumed the recipient
1609 # Known heads are the list of heads that it is assumed the recipient
1609 # of this changegroup will know about.
1610 # of this changegroup will know about.
1610 knownheads = {}
1611 knownheads = {}
1611 # We assume that all parents of bases are known heads.
1612 # We assume that all parents of bases are known heads.
1612 for n in bases:
1613 for n in bases:
1613 for p in cl.parents(n):
1614 for p in cl.parents(n):
1614 if p != nullid:
1615 if p != nullid:
1615 knownheads[p] = 1
1616 knownheads[p] = 1
1616 knownheads = knownheads.keys()
1617 knownheads = knownheads.keys()
1617 if knownheads:
1618 if knownheads:
1618 # Now that we know what heads are known, we can compute which
1619 # Now that we know what heads are known, we can compute which
1619 # changesets are known. The recipient must know about all
1620 # changesets are known. The recipient must know about all
1620 # changesets required to reach the known heads from the null
1621 # changesets required to reach the known heads from the null
1621 # changeset.
1622 # changeset.
1622 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1623 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1623 junk = None
1624 junk = None
1624 # Transform the list into an ersatz set.
1625 # Transform the list into an ersatz set.
1625 has_cl_set = dict.fromkeys(has_cl_set)
1626 has_cl_set = dict.fromkeys(has_cl_set)
1626 else:
1627 else:
1627 # If there were no known heads, the recipient cannot be assumed to
1628 # If there were no known heads, the recipient cannot be assumed to
1628 # know about any changesets.
1629 # know about any changesets.
1629 has_cl_set = {}
1630 has_cl_set = {}
1630
1631
1631 # Make it easy to refer to self.manifest
1632 # Make it easy to refer to self.manifest
1632 mnfst = self.manifest
1633 mnfst = self.manifest
1633 # We don't know which manifests are missing yet
1634 # We don't know which manifests are missing yet
1634 msng_mnfst_set = {}
1635 msng_mnfst_set = {}
1635 # Nor do we know which filenodes are missing.
1636 # Nor do we know which filenodes are missing.
1636 msng_filenode_set = {}
1637 msng_filenode_set = {}
1637
1638
1638 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1639 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1639 junk = None
1640 junk = None
1640
1641
1641 # A changeset always belongs to itself, so the changenode lookup
1642 # A changeset always belongs to itself, so the changenode lookup
1642 # function for a changenode is identity.
1643 # function for a changenode is identity.
1643 def identity(x):
1644 def identity(x):
1644 return x
1645 return x
1645
1646
1646 # A function generating function. Sets up an environment for the
1647 # A function generating function. Sets up an environment for the
1647 # inner function.
1648 # inner function.
1648 def cmp_by_rev_func(revlog):
1649 def cmp_by_rev_func(revlog):
1649 # Compare two nodes by their revision number in the environment's
1650 # Compare two nodes by their revision number in the environment's
1650 # revision history. Since the revision number both represents the
1651 # revision history. Since the revision number both represents the
1651 # most efficient order to read the nodes in, and represents a
1652 # most efficient order to read the nodes in, and represents a
1652 # topological sorting of the nodes, this function is often useful.
1653 # topological sorting of the nodes, this function is often useful.
1653 def cmp_by_rev(a, b):
1654 def cmp_by_rev(a, b):
1654 return cmp(revlog.rev(a), revlog.rev(b))
1655 return cmp(revlog.rev(a), revlog.rev(b))
1655 return cmp_by_rev
1656 return cmp_by_rev
1656
1657
1657 # If we determine that a particular file or manifest node must be a
1658 # If we determine that a particular file or manifest node must be a
1658 # node that the recipient of the changegroup will already have, we can
1659 # node that the recipient of the changegroup will already have, we can
1659 # also assume the recipient will have all the parents. This function
1660 # also assume the recipient will have all the parents. This function
1660 # prunes them from the set of missing nodes.
1661 # prunes them from the set of missing nodes.
1661 def prune_parents(revlog, hasset, msngset):
1662 def prune_parents(revlog, hasset, msngset):
1662 haslst = hasset.keys()
1663 haslst = hasset.keys()
1663 haslst.sort(cmp_by_rev_func(revlog))
1664 haslst.sort(cmp_by_rev_func(revlog))
1664 for node in haslst:
1665 for node in haslst:
1665 parentlst = [p for p in revlog.parents(node) if p != nullid]
1666 parentlst = [p for p in revlog.parents(node) if p != nullid]
1666 while parentlst:
1667 while parentlst:
1667 n = parentlst.pop()
1668 n = parentlst.pop()
1668 if n not in hasset:
1669 if n not in hasset:
1669 hasset[n] = 1
1670 hasset[n] = 1
1670 p = [p for p in revlog.parents(n) if p != nullid]
1671 p = [p for p in revlog.parents(n) if p != nullid]
1671 parentlst.extend(p)
1672 parentlst.extend(p)
1672 for n in hasset:
1673 for n in hasset:
1673 msngset.pop(n, None)
1674 msngset.pop(n, None)
1674
1675
1675 # This is a function generating function used to set up an environment
1676 # This is a function generating function used to set up an environment
1676 # for the inner function to execute in.
1677 # for the inner function to execute in.
1677 def manifest_and_file_collector(changedfileset):
1678 def manifest_and_file_collector(changedfileset):
1678 # This is an information gathering function that gathers
1679 # This is an information gathering function that gathers
1679 # information from each changeset node that goes out as part of
1680 # information from each changeset node that goes out as part of
1680 # the changegroup. The information gathered is a list of which
1681 # the changegroup. The information gathered is a list of which
1681 # manifest nodes are potentially required (the recipient may
1682 # manifest nodes are potentially required (the recipient may
1682 # already have them) and total list of all files which were
1683 # already have them) and total list of all files which were
1683 # changed in any changeset in the changegroup.
1684 # changed in any changeset in the changegroup.
1684 #
1685 #
1685 # We also remember the first changenode we saw any manifest
1686 # We also remember the first changenode we saw any manifest
1686 # referenced by so we can later determine which changenode 'owns'
1687 # referenced by so we can later determine which changenode 'owns'
1687 # the manifest.
1688 # the manifest.
1688 def collect_manifests_and_files(clnode):
1689 def collect_manifests_and_files(clnode):
1689 c = cl.read(clnode)
1690 c = cl.read(clnode)
1690 for f in c[3]:
1691 for f in c[3]:
1691 # This is to make sure we only have one instance of each
1692 # This is to make sure we only have one instance of each
1692 # filename string for each filename.
1693 # filename string for each filename.
1693 changedfileset.setdefault(f, f)
1694 changedfileset.setdefault(f, f)
1694 msng_mnfst_set.setdefault(c[0], clnode)
1695 msng_mnfst_set.setdefault(c[0], clnode)
1695 return collect_manifests_and_files
1696 return collect_manifests_and_files
1696
1697
1697 # Figure out which manifest nodes (of the ones we think might be part
1698 # Figure out which manifest nodes (of the ones we think might be part
1698 # of the changegroup) the recipient must know about and remove them
1699 # of the changegroup) the recipient must know about and remove them
1699 # from the changegroup.
1700 # from the changegroup.
1700 def prune_manifests():
1701 def prune_manifests():
1701 has_mnfst_set = {}
1702 has_mnfst_set = {}
1702 for n in msng_mnfst_set:
1703 for n in msng_mnfst_set:
1703 # If a 'missing' manifest thinks it belongs to a changenode
1704 # If a 'missing' manifest thinks it belongs to a changenode
1704 # the recipient is assumed to have, obviously the recipient
1705 # the recipient is assumed to have, obviously the recipient
1705 # must have that manifest.
1706 # must have that manifest.
1706 linknode = cl.node(mnfst.linkrev(n))
1707 linknode = cl.node(mnfst.linkrev(n))
1707 if linknode in has_cl_set:
1708 if linknode in has_cl_set:
1708 has_mnfst_set[n] = 1
1709 has_mnfst_set[n] = 1
1709 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1710 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1710
1711
1711 # Use the information collected in collect_manifests_and_files to say
1712 # Use the information collected in collect_manifests_and_files to say
1712 # which changenode any manifestnode belongs to.
1713 # which changenode any manifestnode belongs to.
1713 def lookup_manifest_link(mnfstnode):
1714 def lookup_manifest_link(mnfstnode):
1714 return msng_mnfst_set[mnfstnode]
1715 return msng_mnfst_set[mnfstnode]
1715
1716
1716 # A function generating function that sets up the initial environment
1717 # A function generating function that sets up the initial environment
1717 # the inner function.
1718 # the inner function.
1718 def filenode_collector(changedfiles):
1719 def filenode_collector(changedfiles):
1719 next_rev = [0]
1720 next_rev = [0]
1720 # This gathers information from each manifestnode included in the
1721 # This gathers information from each manifestnode included in the
1721 # changegroup about which filenodes the manifest node references
1722 # changegroup about which filenodes the manifest node references
1722 # so we can include those in the changegroup too.
1723 # so we can include those in the changegroup too.
1723 #
1724 #
1724 # It also remembers which changenode each filenode belongs to. It
1725 # It also remembers which changenode each filenode belongs to. It
1725 # does this by assuming the a filenode belongs to the changenode
1726 # does this by assuming the a filenode belongs to the changenode
1726 # the first manifest that references it belongs to.
1727 # the first manifest that references it belongs to.
1727 def collect_msng_filenodes(mnfstnode):
1728 def collect_msng_filenodes(mnfstnode):
1728 r = mnfst.rev(mnfstnode)
1729 r = mnfst.rev(mnfstnode)
1729 if r == next_rev[0]:
1730 if r == next_rev[0]:
1730 # If the last rev we looked at was the one just previous,
1731 # If the last rev we looked at was the one just previous,
1731 # we only need to see a diff.
1732 # we only need to see a diff.
1732 deltamf = mnfst.readdelta(mnfstnode)
1733 deltamf = mnfst.readdelta(mnfstnode)
1733 # For each line in the delta
1734 # For each line in the delta
1734 for f, fnode in deltamf.items():
1735 for f, fnode in deltamf.items():
1735 f = changedfiles.get(f, None)
1736 f = changedfiles.get(f, None)
1736 # And if the file is in the list of files we care
1737 # And if the file is in the list of files we care
1737 # about.
1738 # about.
1738 if f is not None:
1739 if f is not None:
1739 # Get the changenode this manifest belongs to
1740 # Get the changenode this manifest belongs to
1740 clnode = msng_mnfst_set[mnfstnode]
1741 clnode = msng_mnfst_set[mnfstnode]
1741 # Create the set of filenodes for the file if
1742 # Create the set of filenodes for the file if
1742 # there isn't one already.
1743 # there isn't one already.
1743 ndset = msng_filenode_set.setdefault(f, {})
1744 ndset = msng_filenode_set.setdefault(f, {})
1744 # And set the filenode's changelog node to the
1745 # And set the filenode's changelog node to the
1745 # manifest's if it hasn't been set already.
1746 # manifest's if it hasn't been set already.
1746 ndset.setdefault(fnode, clnode)
1747 ndset.setdefault(fnode, clnode)
1747 else:
1748 else:
1748 # Otherwise we need a full manifest.
1749 # Otherwise we need a full manifest.
1749 m = mnfst.read(mnfstnode)
1750 m = mnfst.read(mnfstnode)
1750 # For every file in we care about.
1751 # For every file in we care about.
1751 for f in changedfiles:
1752 for f in changedfiles:
1752 fnode = m.get(f, None)
1753 fnode = m.get(f, None)
1753 # If it's in the manifest
1754 # If it's in the manifest
1754 if fnode is not None:
1755 if fnode is not None:
1755 # See comments above.
1756 # See comments above.
1756 clnode = msng_mnfst_set[mnfstnode]
1757 clnode = msng_mnfst_set[mnfstnode]
1757 ndset = msng_filenode_set.setdefault(f, {})
1758 ndset = msng_filenode_set.setdefault(f, {})
1758 ndset.setdefault(fnode, clnode)
1759 ndset.setdefault(fnode, clnode)
1759 # Remember the revision we hope to see next.
1760 # Remember the revision we hope to see next.
1760 next_rev[0] = r + 1
1761 next_rev[0] = r + 1
1761 return collect_msng_filenodes
1762 return collect_msng_filenodes
1762
1763
1763 # We have a list of filenodes we think we need for a file, lets remove
1764 # We have a list of filenodes we think we need for a file, lets remove
1764 # all those we now the recipient must have.
1765 # all those we now the recipient must have.
1765 def prune_filenodes(f, filerevlog):
1766 def prune_filenodes(f, filerevlog):
1766 msngset = msng_filenode_set[f]
1767 msngset = msng_filenode_set[f]
1767 hasset = {}
1768 hasset = {}
1768 # If a 'missing' filenode thinks it belongs to a changenode we
1769 # If a 'missing' filenode thinks it belongs to a changenode we
1769 # assume the recipient must have, then the recipient must have
1770 # assume the recipient must have, then the recipient must have
1770 # that filenode.
1771 # that filenode.
1771 for n in msngset:
1772 for n in msngset:
1772 clnode = cl.node(filerevlog.linkrev(n))
1773 clnode = cl.node(filerevlog.linkrev(n))
1773 if clnode in has_cl_set:
1774 if clnode in has_cl_set:
1774 hasset[n] = 1
1775 hasset[n] = 1
1775 prune_parents(filerevlog, hasset, msngset)
1776 prune_parents(filerevlog, hasset, msngset)
1776
1777
1777 # A function generator function that sets up the a context for the
1778 # A function generator function that sets up the a context for the
1778 # inner function.
1779 # inner function.
1779 def lookup_filenode_link_func(fname):
1780 def lookup_filenode_link_func(fname):
1780 msngset = msng_filenode_set[fname]
1781 msngset = msng_filenode_set[fname]
1781 # Lookup the changenode the filenode belongs to.
1782 # Lookup the changenode the filenode belongs to.
1782 def lookup_filenode_link(fnode):
1783 def lookup_filenode_link(fnode):
1783 return msngset[fnode]
1784 return msngset[fnode]
1784 return lookup_filenode_link
1785 return lookup_filenode_link
1785
1786
1786 # Add the nodes that were explicitly requested.
1787 # Add the nodes that were explicitly requested.
1787 def add_extra_nodes(name, nodes):
1788 def add_extra_nodes(name, nodes):
1788 if not extranodes or name not in extranodes:
1789 if not extranodes or name not in extranodes:
1789 return
1790 return
1790
1791
1791 for node, linknode in extranodes[name]:
1792 for node, linknode in extranodes[name]:
1792 if node not in nodes:
1793 if node not in nodes:
1793 nodes[node] = linknode
1794 nodes[node] = linknode
1794
1795
1795 # Now that we have all theses utility functions to help out and
1796 # Now that we have all theses utility functions to help out and
1796 # logically divide up the task, generate the group.
1797 # logically divide up the task, generate the group.
1797 def gengroup():
1798 def gengroup():
1798 # The set of changed files starts empty.
1799 # The set of changed files starts empty.
1799 changedfiles = {}
1800 changedfiles = {}
1800 # Create a changenode group generator that will call our functions
1801 # Create a changenode group generator that will call our functions
1801 # back to lookup the owning changenode and collect information.
1802 # back to lookup the owning changenode and collect information.
1802 group = cl.group(msng_cl_lst, identity,
1803 group = cl.group(msng_cl_lst, identity,
1803 manifest_and_file_collector(changedfiles))
1804 manifest_and_file_collector(changedfiles))
1804 for chnk in group:
1805 for chnk in group:
1805 yield chnk
1806 yield chnk
1806
1807
1807 # The list of manifests has been collected by the generator
1808 # The list of manifests has been collected by the generator
1808 # calling our functions back.
1809 # calling our functions back.
1809 prune_manifests()
1810 prune_manifests()
1810 add_extra_nodes(1, msng_mnfst_set)
1811 add_extra_nodes(1, msng_mnfst_set)
1811 msng_mnfst_lst = msng_mnfst_set.keys()
1812 msng_mnfst_lst = msng_mnfst_set.keys()
1812 # Sort the manifestnodes by revision number.
1813 # Sort the manifestnodes by revision number.
1813 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1814 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1814 # Create a generator for the manifestnodes that calls our lookup
1815 # Create a generator for the manifestnodes that calls our lookup
1815 # and data collection functions back.
1816 # and data collection functions back.
1816 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1817 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1817 filenode_collector(changedfiles))
1818 filenode_collector(changedfiles))
1818 for chnk in group:
1819 for chnk in group:
1819 yield chnk
1820 yield chnk
1820
1821
1821 # These are no longer needed, dereference and toss the memory for
1822 # These are no longer needed, dereference and toss the memory for
1822 # them.
1823 # them.
1823 msng_mnfst_lst = None
1824 msng_mnfst_lst = None
1824 msng_mnfst_set.clear()
1825 msng_mnfst_set.clear()
1825
1826
1826 if extranodes:
1827 if extranodes:
1827 for fname in extranodes:
1828 for fname in extranodes:
1828 if isinstance(fname, int):
1829 if isinstance(fname, int):
1829 continue
1830 continue
1830 msng_filenode_set.setdefault(fname, {})
1831 msng_filenode_set.setdefault(fname, {})
1831 changedfiles[fname] = 1
1832 changedfiles[fname] = 1
1832 # Go through all our files in order sorted by name.
1833 # Go through all our files in order sorted by name.
1833 for fname in util.sort(changedfiles):
1834 for fname in util.sort(changedfiles):
1834 filerevlog = self.file(fname)
1835 filerevlog = self.file(fname)
1835 if not len(filerevlog):
1836 if not len(filerevlog):
1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1837 raise util.Abort(_("empty or missing revlog for %s") % fname)
1837 # Toss out the filenodes that the recipient isn't really
1838 # Toss out the filenodes that the recipient isn't really
1838 # missing.
1839 # missing.
1839 if fname in msng_filenode_set:
1840 if fname in msng_filenode_set:
1840 prune_filenodes(fname, filerevlog)
1841 prune_filenodes(fname, filerevlog)
1841 add_extra_nodes(fname, msng_filenode_set[fname])
1842 add_extra_nodes(fname, msng_filenode_set[fname])
1842 msng_filenode_lst = msng_filenode_set[fname].keys()
1843 msng_filenode_lst = msng_filenode_set[fname].keys()
1843 else:
1844 else:
1844 msng_filenode_lst = []
1845 msng_filenode_lst = []
1845 # If any filenodes are left, generate the group for them,
1846 # If any filenodes are left, generate the group for them,
1846 # otherwise don't bother.
1847 # otherwise don't bother.
1847 if len(msng_filenode_lst) > 0:
1848 if len(msng_filenode_lst) > 0:
1848 yield changegroup.chunkheader(len(fname))
1849 yield changegroup.chunkheader(len(fname))
1849 yield fname
1850 yield fname
1850 # Sort the filenodes by their revision #
1851 # Sort the filenodes by their revision #
1851 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1852 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1852 # Create a group generator and only pass in a changenode
1853 # Create a group generator and only pass in a changenode
1853 # lookup function as we need to collect no information
1854 # lookup function as we need to collect no information
1854 # from filenodes.
1855 # from filenodes.
1855 group = filerevlog.group(msng_filenode_lst,
1856 group = filerevlog.group(msng_filenode_lst,
1856 lookup_filenode_link_func(fname))
1857 lookup_filenode_link_func(fname))
1857 for chnk in group:
1858 for chnk in group:
1858 yield chnk
1859 yield chnk
1859 if fname in msng_filenode_set:
1860 if fname in msng_filenode_set:
1860 # Don't need this anymore, toss it to free memory.
1861 # Don't need this anymore, toss it to free memory.
1861 del msng_filenode_set[fname]
1862 del msng_filenode_set[fname]
1862 # Signal that no more groups are left.
1863 # Signal that no more groups are left.
1863 yield changegroup.closechunk()
1864 yield changegroup.closechunk()
1864
1865
1865 if msng_cl_lst:
1866 if msng_cl_lst:
1866 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1867 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1867
1868
1868 return util.chunkbuffer(gengroup())
1869 return util.chunkbuffer(gengroup())
1869
1870
1870 def changegroup(self, basenodes, source):
1871 def changegroup(self, basenodes, source):
1871 # to avoid a race we use changegroupsubset() (issue1320)
1872 # to avoid a race we use changegroupsubset() (issue1320)
1872 return self.changegroupsubset(basenodes, self.heads(), source)
1873 return self.changegroupsubset(basenodes, self.heads(), source)
1873
1874
1874 def _changegroup(self, common, source):
1875 def _changegroup(self, common, source):
1875 """Generate a changegroup of all nodes that we have that a recipient
1876 """Generate a changegroup of all nodes that we have that a recipient
1876 doesn't.
1877 doesn't.
1877
1878
1878 This is much easier than the previous function as we can assume that
1879 This is much easier than the previous function as we can assume that
1879 the recipient has any changenode we aren't sending them.
1880 the recipient has any changenode we aren't sending them.
1880
1881
1881 common is the set of common nodes between remote and self"""
1882 common is the set of common nodes between remote and self"""
1882
1883
1883 self.hook('preoutgoing', throw=True, source=source)
1884 self.hook('preoutgoing', throw=True, source=source)
1884
1885
1885 cl = self.changelog
1886 cl = self.changelog
1886 nodes = cl.findmissing(common)
1887 nodes = cl.findmissing(common)
1887 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1888 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1888 self.changegroupinfo(nodes, source)
1889 self.changegroupinfo(nodes, source)
1889
1890
1890 def identity(x):
1891 def identity(x):
1891 return x
1892 return x
1892
1893
1893 def gennodelst(log):
1894 def gennodelst(log):
1894 for r in log:
1895 for r in log:
1895 n = log.node(r)
1896 n = log.node(r)
1896 if log.linkrev(n) in revset:
1897 if log.linkrev(n) in revset:
1897 yield n
1898 yield n
1898
1899
1899 def changed_file_collector(changedfileset):
1900 def changed_file_collector(changedfileset):
1900 def collect_changed_files(clnode):
1901 def collect_changed_files(clnode):
1901 c = cl.read(clnode)
1902 c = cl.read(clnode)
1902 for fname in c[3]:
1903 for fname in c[3]:
1903 changedfileset[fname] = 1
1904 changedfileset[fname] = 1
1904 return collect_changed_files
1905 return collect_changed_files
1905
1906
1906 def lookuprevlink_func(revlog):
1907 def lookuprevlink_func(revlog):
1907 def lookuprevlink(n):
1908 def lookuprevlink(n):
1908 return cl.node(revlog.linkrev(n))
1909 return cl.node(revlog.linkrev(n))
1909 return lookuprevlink
1910 return lookuprevlink
1910
1911
1911 def gengroup():
1912 def gengroup():
1912 # construct a list of all changed files
1913 # construct a list of all changed files
1913 changedfiles = {}
1914 changedfiles = {}
1914
1915
1915 for chnk in cl.group(nodes, identity,
1916 for chnk in cl.group(nodes, identity,
1916 changed_file_collector(changedfiles)):
1917 changed_file_collector(changedfiles)):
1917 yield chnk
1918 yield chnk
1918
1919
1919 mnfst = self.manifest
1920 mnfst = self.manifest
1920 nodeiter = gennodelst(mnfst)
1921 nodeiter = gennodelst(mnfst)
1921 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1922 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1922 yield chnk
1923 yield chnk
1923
1924
1924 for fname in util.sort(changedfiles):
1925 for fname in util.sort(changedfiles):
1925 filerevlog = self.file(fname)
1926 filerevlog = self.file(fname)
1926 if not len(filerevlog):
1927 if not len(filerevlog):
1927 raise util.Abort(_("empty or missing revlog for %s") % fname)
1928 raise util.Abort(_("empty or missing revlog for %s") % fname)
1928 nodeiter = gennodelst(filerevlog)
1929 nodeiter = gennodelst(filerevlog)
1929 nodeiter = list(nodeiter)
1930 nodeiter = list(nodeiter)
1930 if nodeiter:
1931 if nodeiter:
1931 yield changegroup.chunkheader(len(fname))
1932 yield changegroup.chunkheader(len(fname))
1932 yield fname
1933 yield fname
1933 lookup = lookuprevlink_func(filerevlog)
1934 lookup = lookuprevlink_func(filerevlog)
1934 for chnk in filerevlog.group(nodeiter, lookup):
1935 for chnk in filerevlog.group(nodeiter, lookup):
1935 yield chnk
1936 yield chnk
1936
1937
1937 yield changegroup.closechunk()
1938 yield changegroup.closechunk()
1938
1939
1939 if nodes:
1940 if nodes:
1940 self.hook('outgoing', node=hex(nodes[0]), source=source)
1941 self.hook('outgoing', node=hex(nodes[0]), source=source)
1941
1942
1942 return util.chunkbuffer(gengroup())
1943 return util.chunkbuffer(gengroup())
1943
1944
1944 def addchangegroup(self, source, srctype, url, emptyok=False):
1945 def addchangegroup(self, source, srctype, url, emptyok=False):
1945 """add changegroup to repo.
1946 """add changegroup to repo.
1946
1947
1947 return values:
1948 return values:
1948 - nothing changed or no source: 0
1949 - nothing changed or no source: 0
1949 - more heads than before: 1+added heads (2..n)
1950 - more heads than before: 1+added heads (2..n)
1950 - less heads than before: -1-removed heads (-2..-n)
1951 - less heads than before: -1-removed heads (-2..-n)
1951 - number of heads stays the same: 1
1952 - number of heads stays the same: 1
1952 """
1953 """
1953 def csmap(x):
1954 def csmap(x):
1954 self.ui.debug(_("add changeset %s\n") % short(x))
1955 self.ui.debug(_("add changeset %s\n") % short(x))
1955 return len(cl)
1956 return len(cl)
1956
1957
1957 def revmap(x):
1958 def revmap(x):
1958 return cl.rev(x)
1959 return cl.rev(x)
1959
1960
1960 if not source:
1961 if not source:
1961 return 0
1962 return 0
1962
1963
1963 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1964 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1964
1965
1965 changesets = files = revisions = 0
1966 changesets = files = revisions = 0
1966
1967
1967 # write changelog data to temp files so concurrent readers will not see
1968 # write changelog data to temp files so concurrent readers will not see
1968 # inconsistent view
1969 # inconsistent view
1969 cl = self.changelog
1970 cl = self.changelog
1970 cl.delayupdate()
1971 cl.delayupdate()
1971 oldheads = len(cl.heads())
1972 oldheads = len(cl.heads())
1972
1973
1973 tr = self.transaction()
1974 tr = self.transaction()
1974 try:
1975 try:
1975 trp = weakref.proxy(tr)
1976 trp = weakref.proxy(tr)
1976 # pull off the changeset group
1977 # pull off the changeset group
1977 self.ui.status(_("adding changesets\n"))
1978 self.ui.status(_("adding changesets\n"))
1978 cor = len(cl) - 1
1979 cor = len(cl) - 1
1979 chunkiter = changegroup.chunkiter(source)
1980 chunkiter = changegroup.chunkiter(source)
1980 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1981 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1981 raise util.Abort(_("received changelog group is empty"))
1982 raise util.Abort(_("received changelog group is empty"))
1982 cnr = len(cl) - 1
1983 cnr = len(cl) - 1
1983 changesets = cnr - cor
1984 changesets = cnr - cor
1984
1985
1985 # pull off the manifest group
1986 # pull off the manifest group
1986 self.ui.status(_("adding manifests\n"))
1987 self.ui.status(_("adding manifests\n"))
1987 chunkiter = changegroup.chunkiter(source)
1988 chunkiter = changegroup.chunkiter(source)
1988 # no need to check for empty manifest group here:
1989 # no need to check for empty manifest group here:
1989 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1990 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1990 # no new manifest will be created and the manifest group will
1991 # no new manifest will be created and the manifest group will
1991 # be empty during the pull
1992 # be empty during the pull
1992 self.manifest.addgroup(chunkiter, revmap, trp)
1993 self.manifest.addgroup(chunkiter, revmap, trp)
1993
1994
1994 # process the files
1995 # process the files
1995 self.ui.status(_("adding file changes\n"))
1996 self.ui.status(_("adding file changes\n"))
1996 while 1:
1997 while 1:
1997 f = changegroup.getchunk(source)
1998 f = changegroup.getchunk(source)
1998 if not f:
1999 if not f:
1999 break
2000 break
2000 self.ui.debug(_("adding %s revisions\n") % f)
2001 self.ui.debug(_("adding %s revisions\n") % f)
2001 fl = self.file(f)
2002 fl = self.file(f)
2002 o = len(fl)
2003 o = len(fl)
2003 chunkiter = changegroup.chunkiter(source)
2004 chunkiter = changegroup.chunkiter(source)
2004 if fl.addgroup(chunkiter, revmap, trp) is None:
2005 if fl.addgroup(chunkiter, revmap, trp) is None:
2005 raise util.Abort(_("received file revlog group is empty"))
2006 raise util.Abort(_("received file revlog group is empty"))
2006 revisions += len(fl) - o
2007 revisions += len(fl) - o
2007 files += 1
2008 files += 1
2008
2009
2009 # make changelog see real files again
2010 # make changelog see real files again
2010 cl.finalize(trp)
2011 cl.finalize(trp)
2011
2012
2012 newheads = len(self.changelog.heads())
2013 newheads = len(self.changelog.heads())
2013 heads = ""
2014 heads = ""
2014 if oldheads and newheads != oldheads:
2015 if oldheads and newheads != oldheads:
2015 heads = _(" (%+d heads)") % (newheads - oldheads)
2016 heads = _(" (%+d heads)") % (newheads - oldheads)
2016
2017
2017 self.ui.status(_("added %d changesets"
2018 self.ui.status(_("added %d changesets"
2018 " with %d changes to %d files%s\n")
2019 " with %d changes to %d files%s\n")
2019 % (changesets, revisions, files, heads))
2020 % (changesets, revisions, files, heads))
2020
2021
2021 if changesets > 0:
2022 if changesets > 0:
2022 self.hook('pretxnchangegroup', throw=True,
2023 self.hook('pretxnchangegroup', throw=True,
2023 node=hex(self.changelog.node(cor+1)), source=srctype,
2024 node=hex(self.changelog.node(cor+1)), source=srctype,
2024 url=url)
2025 url=url)
2025
2026
2026 tr.close()
2027 tr.close()
2027 finally:
2028 finally:
2028 del tr
2029 del tr
2029
2030
2030 if changesets > 0:
2031 if changesets > 0:
2031 # forcefully update the on-disk branch cache
2032 # forcefully update the on-disk branch cache
2032 self.ui.debug(_("updating the branch cache\n"))
2033 self.ui.debug(_("updating the branch cache\n"))
2033 self.branchtags()
2034 self.branchtags()
2034 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2035 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2035 source=srctype, url=url)
2036 source=srctype, url=url)
2036
2037
2037 for i in xrange(cor + 1, cnr + 1):
2038 for i in xrange(cor + 1, cnr + 1):
2038 self.hook("incoming", node=hex(self.changelog.node(i)),
2039 self.hook("incoming", node=hex(self.changelog.node(i)),
2039 source=srctype, url=url)
2040 source=srctype, url=url)
2040
2041
2041 # never return 0 here:
2042 # never return 0 here:
2042 if newheads < oldheads:
2043 if newheads < oldheads:
2043 return newheads - oldheads - 1
2044 return newheads - oldheads - 1
2044 else:
2045 else:
2045 return newheads - oldheads + 1
2046 return newheads - oldheads + 1
2046
2047
2047
2048
2048 def stream_in(self, remote):
2049 def stream_in(self, remote):
2049 fp = remote.stream_out()
2050 fp = remote.stream_out()
2050 l = fp.readline()
2051 l = fp.readline()
2051 try:
2052 try:
2052 resp = int(l)
2053 resp = int(l)
2053 except ValueError:
2054 except ValueError:
2054 raise util.UnexpectedOutput(
2055 raise util.UnexpectedOutput(
2055 _('Unexpected response from remote server:'), l)
2056 _('Unexpected response from remote server:'), l)
2056 if resp == 1:
2057 if resp == 1:
2057 raise util.Abort(_('operation forbidden by server'))
2058 raise util.Abort(_('operation forbidden by server'))
2058 elif resp == 2:
2059 elif resp == 2:
2059 raise util.Abort(_('locking the remote repository failed'))
2060 raise util.Abort(_('locking the remote repository failed'))
2060 elif resp != 0:
2061 elif resp != 0:
2061 raise util.Abort(_('the server sent an unknown error code'))
2062 raise util.Abort(_('the server sent an unknown error code'))
2062 self.ui.status(_('streaming all changes\n'))
2063 self.ui.status(_('streaming all changes\n'))
2063 l = fp.readline()
2064 l = fp.readline()
2064 try:
2065 try:
2065 total_files, total_bytes = map(int, l.split(' ', 1))
2066 total_files, total_bytes = map(int, l.split(' ', 1))
2066 except (ValueError, TypeError):
2067 except (ValueError, TypeError):
2067 raise util.UnexpectedOutput(
2068 raise util.UnexpectedOutput(
2068 _('Unexpected response from remote server:'), l)
2069 _('Unexpected response from remote server:'), l)
2069 self.ui.status(_('%d files to transfer, %s of data\n') %
2070 self.ui.status(_('%d files to transfer, %s of data\n') %
2070 (total_files, util.bytecount(total_bytes)))
2071 (total_files, util.bytecount(total_bytes)))
2071 start = time.time()
2072 start = time.time()
2072 for i in xrange(total_files):
2073 for i in xrange(total_files):
2073 # XXX doesn't support '\n' or '\r' in filenames
2074 # XXX doesn't support '\n' or '\r' in filenames
2074 l = fp.readline()
2075 l = fp.readline()
2075 try:
2076 try:
2076 name, size = l.split('\0', 1)
2077 name, size = l.split('\0', 1)
2077 size = int(size)
2078 size = int(size)
2078 except (ValueError, TypeError):
2079 except (ValueError, TypeError):
2079 raise util.UnexpectedOutput(
2080 raise util.UnexpectedOutput(
2080 _('Unexpected response from remote server:'), l)
2081 _('Unexpected response from remote server:'), l)
2081 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2082 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2082 ofp = self.sopener(name, 'w')
2083 ofp = self.sopener(name, 'w')
2083 for chunk in util.filechunkiter(fp, limit=size):
2084 for chunk in util.filechunkiter(fp, limit=size):
2084 ofp.write(chunk)
2085 ofp.write(chunk)
2085 ofp.close()
2086 ofp.close()
2086 elapsed = time.time() - start
2087 elapsed = time.time() - start
2087 if elapsed <= 0:
2088 if elapsed <= 0:
2088 elapsed = 0.001
2089 elapsed = 0.001
2089 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2090 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2090 (util.bytecount(total_bytes), elapsed,
2091 (util.bytecount(total_bytes), elapsed,
2091 util.bytecount(total_bytes / elapsed)))
2092 util.bytecount(total_bytes / elapsed)))
2092 self.invalidate()
2093 self.invalidate()
2093 return len(self.heads()) + 1
2094 return len(self.heads()) + 1
2094
2095
2095 def clone(self, remote, heads=[], stream=False):
2096 def clone(self, remote, heads=[], stream=False):
2096 '''clone remote repository.
2097 '''clone remote repository.
2097
2098
2098 keyword arguments:
2099 keyword arguments:
2099 heads: list of revs to clone (forces use of pull)
2100 heads: list of revs to clone (forces use of pull)
2100 stream: use streaming clone if possible'''
2101 stream: use streaming clone if possible'''
2101
2102
2102 # now, all clients that can request uncompressed clones can
2103 # now, all clients that can request uncompressed clones can
2103 # read repo formats supported by all servers that can serve
2104 # read repo formats supported by all servers that can serve
2104 # them.
2105 # them.
2105
2106
2106 # if revlog format changes, client will have to check version
2107 # if revlog format changes, client will have to check version
2107 # and format flags on "stream" capability, and use
2108 # and format flags on "stream" capability, and use
2108 # uncompressed only if compatible.
2109 # uncompressed only if compatible.
2109
2110
2110 if stream and not heads and remote.capable('stream'):
2111 if stream and not heads and remote.capable('stream'):
2111 return self.stream_in(remote)
2112 return self.stream_in(remote)
2112 return self.pull(remote, heads)
2113 return self.pull(remote, heads)
2113
2114
2114 # used to avoid circular references so destructors work
2115 # used to avoid circular references so destructors work
2115 def aftertrans(files):
2116 def aftertrans(files):
2116 renamefiles = [tuple(t) for t in files]
2117 renamefiles = [tuple(t) for t in files]
2117 def a():
2118 def a():
2118 for src, dest in renamefiles:
2119 for src, dest in renamefiles:
2119 util.rename(src, dest)
2120 util.rename(src, dest)
2120 return a
2121 return a
2121
2122
2122 def instance(ui, path, create):
2123 def instance(ui, path, create):
2123 return localrepository(ui, util.drop_scheme('file', path), create)
2124 return localrepository(ui, util.drop_scheme('file', path), create)
2124
2125
2125 def islocal(path):
2126 def islocal(path):
2126 return True
2127 return True
General Comments 0
You need to be logged in to leave comments. Login now