##// END OF EJS Templates
node: replace nullid and friends with nodeconstants class [WIP]...
Joerg Sonnenberger -
r47758:07b9ebea default draft
parent child Browse files
Show More
@@ -38,7 +38,6 b' import collections'
38 38 from mercurial.i18n import _
39 39 from mercurial.node import (
40 40 hex,
41 nullid,
42 41 short,
43 42 )
44 43 from mercurial import (
@@ -109,7 +108,7 b' class emptyfilecontext(object):'
109 108 return b''
110 109
111 110 def node(self):
112 return nullid
111 return self._repo.nullid
113 112
114 113
115 114 def uniq(lst):
@@ -927,7 +926,7 b' class fixupstate(object):'
927 926 the commit is a clone from ctx, with a (optionally) different p1, and
928 927 different file contents replaced by memworkingcopy.
929 928 """
930 parents = p1 and (p1, nullid)
929 parents = p1 and (p1, self.repo.nullid)
931 930 extra = ctx.extra()
932 931 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
933 932 extra[b'absorb_source'] = ctx.hex()
@@ -9,7 +9,7 b' from __future__ import absolute_import'
9 9 import os
10 10
11 11 from mercurial.i18n import _
12 from mercurial.node import nullhex
12 from mercurial.node import sha1nodeconstants
13 13 from mercurial import (
14 14 config,
15 15 error,
@@ -192,7 +192,7 b' class convert_git(common.converter_sourc'
192 192 return heads
193 193
194 194 def catfile(self, rev, ftype):
195 if rev == nullhex:
195 if rev == sha1nodeconstants.nullhex:
196 196 raise IOError
197 197 self.catfilepipe[0].write(rev + b'\n')
198 198 self.catfilepipe[0].flush()
@@ -214,7 +214,7 b' class convert_git(common.converter_sourc'
214 214 return data
215 215
216 216 def getfile(self, name, rev):
217 if rev == nullhex:
217 if rev == sha1nodeconstants.nullhex:
218 218 return None, None
219 219 if name == b'.hgsub':
220 220 data = b'\n'.join([m.hgsub() for m in self.submoditer()])
@@ -228,7 +228,7 b' class convert_git(common.converter_sourc'
228 228 return data, mode
229 229
230 230 def submoditer(self):
231 null = nullhex
231 null = sha1nodeconstants.nullhex
232 232 for m in sorted(self.submodules, key=lambda p: p.path):
233 233 if m.node != null:
234 234 yield m
@@ -317,7 +317,7 b' class convert_git(common.converter_sourc'
317 317 subexists[0] = True
318 318 if entry[4] == b'D' or renamesource:
319 319 subdeleted[0] = True
320 changes.append((b'.hgsub', nullhex))
320 changes.append((b'.hgsub', sha1nodeconstants.nullhex))
321 321 else:
322 322 changes.append((b'.hgsub', b''))
323 323 elif entry[1] == b'160000' or entry[0] == b':160000':
@@ -325,7 +325,7 b' class convert_git(common.converter_sourc'
325 325 subexists[0] = True
326 326 else:
327 327 if renamesource:
328 h = nullhex
328 h = sha1nodeconstants.nullhex
329 329 self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b""
330 330 changes.append((f, h))
331 331
@@ -362,7 +362,7 b' class convert_git(common.converter_sourc'
362 362
363 363 if subexists[0]:
364 364 if subdeleted[0]:
365 changes.append((b'.hgsubstate', nullhex))
365 changes.append((b'.hgsubstate', sha1nodeconstants.nullhex))
366 366 else:
367 367 self.retrievegitmodules(version)
368 368 changes.append((b'.hgsubstate', b''))
@@ -27,8 +27,7 b' from mercurial.pycompat import open'
27 27 from mercurial.node import (
28 28 bin,
29 29 hex,
30 nullhex,
31 nullid,
30 sha1nodeconstants,
32 31 )
33 32 from mercurial import (
34 33 bookmarks,
@@ -160,7 +159,7 b' class mercurial_sink(common.converter_si'
160 159 continue
161 160 revid = revmap.get(source.lookuprev(s[0]))
162 161 if not revid:
163 if s[0] == nullhex:
162 if s[0] == sha1nodeconstants.nullhex:
164 163 revid = s[0]
165 164 else:
166 165 # missing, but keep for hash stability
@@ -179,7 +178,7 b' class mercurial_sink(common.converter_si'
179 178
180 179 revid = s[0]
181 180 subpath = s[1]
182 if revid != nullhex:
181 if revid != sha1nodeconstants.nullhex:
183 182 revmap = self.subrevmaps.get(subpath)
184 183 if revmap is None:
185 184 revmap = mapfile(
@@ -304,9 +303,9 b' class mercurial_sink(common.converter_si'
304 303 parent = parents[0]
305 304
306 305 if len(parents) < 2:
307 parents.append(nullid)
306 parents.append(self.repo.nullid)
308 307 if len(parents) < 2:
309 parents.append(nullid)
308 parents.append(self.repo.nullid)
310 309 p2 = parents.pop(0)
311 310
312 311 text = commit.desc
@@ -356,7 +355,7 b' class mercurial_sink(common.converter_si'
356 355 p2 = parents.pop(0)
357 356 p1ctx = self.repo[p1]
358 357 p2ctx = None
359 if p2 != nullid:
358 if p2 != self.repo.nullid:
360 359 p2ctx = self.repo[p2]
361 360 fileset = set(files)
362 361 if full:
@@ -421,7 +420,7 b' class mercurial_sink(common.converter_si'
421 420
422 421 def puttags(self, tags):
423 422 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
424 tagparent = tagparent or nullid
423 tagparent = tagparent or self.repo.nullid
425 424
426 425 oldlines = set()
427 426 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
@@ -4,7 +4,7 b' import contextlib'
4 4 import errno
5 5 import os
6 6
7 from mercurial.node import nullid
7 from mercurial.node import sha1nodeconstants
8 8 from mercurial import (
9 9 error,
10 10 extensions,
@@ -81,14 +81,16 b' class gitdirstate(object):'
81 81 except pygit2.GitError:
82 82 # Typically happens when peeling HEAD fails, as in an
83 83 # empty repository.
84 return nullid
84 return sha1nodeconstants.nullid
85 85
86 86 def p2(self):
87 87 # TODO: MERGE_HEAD? something like that, right?
88 return nullid
88 return sha1nodeconstants.nullid
89 89
90 def setparents(self, p1, p2=nullid):
91 assert p2 == nullid, b'TODO merging support'
90 def setparents(self, p1, p2=None):
91 if p2 is None:
92 p2 = sha1nodeconstants.nullid
93 assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
92 94 self.git.head.set_target(gitutil.togitnode(p1))
93 95
94 96 @util.propertycache
@@ -102,7 +104,7 b' class gitdirstate(object):'
102 104
103 105 def parents(self):
104 106 # TODO how on earth do we find p2 if a merge is in flight?
105 return self.p1(), nullid
107 return self.p1(), sha1nodeconstants.nullid
106 108
107 109 def __iter__(self):
108 110 return (pycompat.fsencode(f.path) for f in self.git.index)
@@ -5,11 +5,8 b' from mercurial.i18n import _'
5 5 from mercurial.node import (
6 6 bin,
7 7 hex,
8 nullhex,
9 nullid,
10 8 nullrev,
11 9 sha1nodeconstants,
12 wdirhex,
13 10 )
14 11 from mercurial import (
15 12 ancestor,
@@ -47,7 +44,7 b' class baselog(object): # revlog.revlog)'
47 44 )
48 45
49 46 def rev(self, n):
50 if n == nullid:
47 if n == sha1nodeconstants.nullid:
51 48 return -1
52 49 t = self._db.execute(
53 50 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
@@ -58,7 +55,7 b' class baselog(object): # revlog.revlog)'
58 55
59 56 def node(self, r):
60 57 if r == nullrev:
61 return nullid
58 return sha1nodeconstants.nullid
62 59 t = self._db.execute(
63 60 'SELECT node FROM changelog WHERE rev = ?', (r,)
64 61 ).fetchone()
@@ -134,7 +131,7 b' class changelog(baselog):'
134 131 bin(v[0]): v[1]
135 132 for v in self._db.execute('SELECT node, rev FROM changelog')
136 133 }
137 r[nullid] = nullrev
134 r[sha1nodeconstants.nullid] = nullrev
138 135 return r
139 136
140 137 def tip(self):
@@ -143,7 +140,7 b' class changelog(baselog):'
143 140 ).fetchone()
144 141 if t:
145 142 return bin(t[0])
146 return nullid
143 return sha1nodeconstants.nullid
147 144
148 145 def revs(self, start=0, stop=None):
149 146 if stop is None:
@@ -163,7 +160,7 b' class changelog(baselog):'
163 160 return next(t)
164 161
165 162 def _partialmatch(self, id):
166 if wdirhex.startswith(id):
163 if sha1nodeconstants.wdirhex.startswith(id):
167 164 raise error.WdirUnsupported
168 165 candidates = [
169 166 bin(x[0])
@@ -171,8 +168,8 b' class changelog(baselog):'
171 168 'SELECT node FROM changelog WHERE node LIKE ?', (id + b'%',)
172 169 )
173 170 ]
174 if nullhex.startswith(id):
175 candidates.append(nullid)
171 if sha1nodeconstants.nullhex.startswith(id):
172 candidates.append(sha1nodeconstants.nullid)
176 173 if len(candidates) > 1:
177 174 raise error.AmbiguousPrefixLookupError(
178 175 id, b'00changelog.i', _(b'ambiguous identifier')
@@ -217,8 +214,10 b' class changelog(baselog):'
217 214 else:
218 215 n = nodeorrev
219 216 # handle looking up nullid
220 if n == nullid:
221 return hgchangelog._changelogrevision(extra={}, manifest=nullid)
217 if n == sha1nodeconstants.nullid:
218 return hgchangelog._changelogrevision(
219 extra={}, manifest=sha1nodeconstants.nullid
220 )
222 221 hn = gitutil.togitnode(n)
223 222 # We've got a real commit!
224 223 files = [
@@ -234,7 +233,7 b' class changelog(baselog):'
234 233 for r in self._db.execute(
235 234 'SELECT filename FROM changedfiles '
236 235 'WHERE node = ? and filenode = ?',
237 (hn, nullhex),
236 (hn, sha1nodeconstants.nullhex),
238 237 )
239 238 ]
240 239 c = self.gitrepo[hn]
@@ -295,7 +294,7 b' class changelog(baselog):'
295 294 not supplied, uses all of the revlog's heads. If common is not
296 295 supplied, uses nullid."""
297 296 if common is None:
298 common = [nullid]
297 common = [sha1nodeconstants.nullid]
299 298 if heads is None:
300 299 heads = self.heads()
301 300
@@ -394,9 +393,9 b' class changelog(baselog):'
394 393 ):
395 394 parents = []
396 395 hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
397 if p1 != nullid:
396 if p1 != sha1nodeconstants.nullid:
398 397 parents.append(hp1)
399 if p2 and p2 != nullid:
398 if p2 and p2 != sha1nodeconstants.nullid:
400 399 parents.append(hp2)
401 400 assert date is not None
402 401 timestamp, tz = date
@@ -429,7 +428,7 b' class manifestlog(baselog):'
429 428 return self.get(b'', node)
430 429
431 430 def get(self, relpath, node):
432 if node == nullid:
431 if node == sha1nodeconstants.nullid:
433 432 # TODO: this should almost certainly be a memgittreemanifestctx
434 433 return manifest.memtreemanifestctx(self, relpath)
435 434 commit = self.gitrepo[gitutil.togitnode(node)]
@@ -448,9 +447,10 b' class filelog(baselog):'
448 447 super(filelog, self).__init__(gr, db)
449 448 assert isinstance(path, bytes)
450 449 self.path = path
450 self.nullid = sha1nodeconstants.nullid
451 451
452 452 def read(self, node):
453 if node == nullid:
453 if node == sha1nodeconstants.nullid:
454 454 return b''
455 455 return self.gitrepo[gitutil.togitnode(node)].data
456 456
@@ -1,7 +1,7 b''
1 1 """utilities to assist in working with pygit2"""
2 2 from __future__ import absolute_import
3 3
4 from mercurial.node import bin, hex, nullid
4 from mercurial.node import bin, hex, sha1nodeconstants
5 5
6 6 from mercurial import pycompat
7 7
@@ -50,4 +50,4 b' def fromgitnode(n):'
50 50 return bin(n)
51 51
52 52
53 nullgit = togitnode(nullid)
53 nullgit = togitnode(sha1nodeconstants.nullid)
@@ -5,10 +5,7 b' import os'
5 5 import sqlite3
6 6
7 7 from mercurial.i18n import _
8 from mercurial.node import (
9 nullhex,
10 nullid,
11 )
8 from mercurial.node import sha1nodeconstants
12 9
13 10 from mercurial import (
14 11 encoding,
@@ -281,7 +278,7 b' def _index_repo('
281 278 for pos, commit in enumerate(walker):
282 279 if prog is not None:
283 280 prog.update(pos)
284 p1 = p2 = nullhex
281 p1 = p2 = sha1nodeconstants.nullhex
285 282 if len(commit.parents) > 2:
286 283 raise error.ProgrammingError(
287 284 (
@@ -318,7 +315,9 b' def _index_repo('
318 315 )
319 316 new_files = (p.delta.new_file for p in patchgen)
320 317 files = {
321 nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
318 nf.path: nf.id.hex
319 for nf in new_files
320 if nf.id.raw != sha1nodeconstants.nullid
322 321 }
323 322 for p, n in files.items():
324 323 # We intentionally set NULLs for any file parentage
@@ -14,7 +14,6 b' from mercurial.i18n import _'
14 14 from mercurial.node import (
15 15 bin,
16 16 hex,
17 nullid,
18 17 short,
19 18 )
20 19 from mercurial import (
@@ -314,7 +313,9 b' def _dosign(ui, repo, *revs, **opts):'
314 313 if revs:
315 314 nodes = [repo.lookup(n) for n in revs]
316 315 else:
317 nodes = [node for node in repo.dirstate.parents() if node != nullid]
316 nodes = [
317 node for node in repo.dirstate.parents() if node != repo.nullid
318 ]
318 319 if len(nodes) > 1:
319 320 raise error.Abort(
320 321 _(b'uncommitted merge - please provide a specific revision')
@@ -40,7 +40,6 b' import os'
40 40
41 41 from mercurial.i18n import _
42 42 from mercurial.node import (
43 nullid,
44 43 nullrev,
45 44 short,
46 45 )
@@ -95,7 +94,7 b' def difftree(ui, repo, node1=None, node2'
95 94 mmap2 = repo[node2].manifest()
96 95 m = scmutil.match(repo[node1], files)
97 96 st = repo.status(node1, node2, m)
98 empty = short(nullid)
97 empty = short(repo.nullid)
99 98
100 99 for f in st.modified:
101 100 # TODO get file permissions
@@ -317,9 +316,9 b' def revtree(ui, args, repo, full=b"tree"'
317 316 parentstr = b""
318 317 if parents:
319 318 pp = repo.changelog.parents(n)
320 if pp[0] != nullid:
319 if pp[0] != repo.nullid:
321 320 parentstr += b" " + short(pp[0])
322 if pp[1] != nullid:
321 if pp[1] != repo.nullid:
323 322 parentstr += b" " + short(pp[1])
324 323 if not full:
325 324 ui.write(b"%s%s\n" % (short(n), parentstr))
@@ -22,7 +22,6 b' from mercurial.i18n import _'
22 22 from mercurial.node import (
23 23 bin,
24 24 hex,
25 nullid,
26 25 )
27 26
28 27 from mercurial import (
@@ -117,8 +116,8 b' def recorddirstateparents(dirstate, old,'
117 116 new = list(new)
118 117 if util.safehasattr(dirstate, 'journalstorage'):
119 118 # only record two hashes if there was a merge
120 oldhashes = old[:1] if old[1] == nullid else old
121 newhashes = new[:1] if new[1] == nullid else new
119 oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
120 newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
122 121 dirstate.journalstorage.record(
123 122 wdirparenttype, b'.', oldhashes, newhashes
124 123 )
@@ -131,7 +130,7 b' def recordbookmarks(orig, store, fp):'
131 130 if util.safehasattr(repo, 'journal'):
132 131 oldmarks = bookmarks.bmstore(repo)
133 132 for mark, value in pycompat.iteritems(store):
134 oldvalue = oldmarks.get(mark, nullid)
133 oldvalue = oldmarks.get(mark, repo.nullid)
135 134 if value != oldvalue:
136 135 repo.journal.record(bookmarktype, mark, oldvalue, value)
137 136 return orig(store, fp)
@@ -11,7 +11,8 b' from __future__ import absolute_import'
11 11
12 12 from mercurial.i18n import _
13 13
14 from mercurial import node, util
14 from mercurial.node import short
15 from mercurial import util
15 16 from mercurial.utils import (
16 17 urlutil,
17 18 )
@@ -137,7 +138,7 b' class basestore(object):'
137 138 filestocheck = [] # list of (cset, filename, expectedhash)
138 139 for rev in revs:
139 140 cctx = self.repo[rev]
140 cset = b"%d:%s" % (cctx.rev(), node.short(cctx.node()))
141 cset = b"%d:%s" % (cctx.rev(), short(cctx.node()))
141 142
142 143 for standin in cctx:
143 144 filename = lfutil.splitstandin(standin)
@@ -17,7 +17,6 b' from mercurial.i18n import _'
17 17 from mercurial.node import (
18 18 bin,
19 19 hex,
20 nullid,
21 20 )
22 21
23 22 from mercurial import (
@@ -115,7 +114,7 b' def lfconvert(ui, src, dest, *pats, **op'
115 114 rsrc[ctx]
116 115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
117 116 )
118 revmap = {nullid: nullid}
117 revmap = {rsrc.nullid: rdst.nullid}
119 118 if tolfile:
120 119 # Lock destination to prevent modification while it is converted to.
121 120 # Don't need to lock src because we are just reading from its
@@ -340,7 +339,7 b' def _commitcontext(rdst, parents, ctx, d'
340 339 # Generate list of changed files
341 340 def _getchangedfiles(ctx, parents):
342 341 files = set(ctx.files())
343 if nullid not in parents:
342 if ctx.repo().nullid not in parents:
344 343 mc = ctx.manifest()
345 344 for pctx in ctx.parents():
346 345 for fn in pctx.manifest().diff(mc):
@@ -354,7 +353,7 b' def _convertparents(ctx, revmap):'
354 353 for p in ctx.parents():
355 354 parents.append(revmap[p.node()])
356 355 while len(parents) < 2:
357 parents.append(nullid)
356 parents.append(ctx.repo().nullid)
358 357 return parents
359 358
360 359
@@ -15,10 +15,7 b' import os'
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 from mercurial.node import (
19 hex,
20 nullid,
21 )
18 from mercurial.node import hex
22 19 from mercurial.pycompat import open
23 20
24 21 from mercurial import (
@@ -613,7 +610,7 b' def getlfilestoupload(repo, missing, add'
613 610 ) as progress:
614 611 for i, n in enumerate(missing):
615 612 progress.update(i)
616 parents = [p for p in repo[n].parents() if p != nullid]
613 parents = [p for p in repo[n].parents() if p != repo.nullid]
617 614
618 615 with lfstatus(repo, value=False):
619 616 ctx = repo[n]
@@ -10,7 +10,7 b' from __future__ import absolute_import'
10 10 import hashlib
11 11
12 12 from mercurial.i18n import _
13 from mercurial.node import bin, hex, nullid, short
13 from mercurial.node import bin, hex, short
14 14 from mercurial.pycompat import (
15 15 getattr,
16 16 setattr,
@@ -158,7 +158,7 b' def _islfs(rlog, node=None, rev=None):'
158 158 rev = rlog.rev(node)
159 159 else:
160 160 node = rlog.node(rev)
161 if node == nullid:
161 if node == rlog.nullid:
162 162 return False
163 163 flags = rlog.flags(rev)
164 164 return bool(flags & revlog.REVIDX_EXTSTORED)
@@ -73,7 +73,6 b' from mercurial.i18n import _'
73 73 from mercurial.node import (
74 74 bin,
75 75 hex,
76 nullid,
77 76 nullrev,
78 77 short,
79 78 )
@@ -908,13 +907,13 b' class queue(object):'
908 907 """
909 908 if rev is None:
910 909 (p1, p2) = repo.dirstate.parents()
911 if p2 == nullid:
910 if p2 == repo.nullid:
912 911 return p1
913 912 if not self.applied:
914 913 return None
915 914 return self.applied[-1].node
916 915 p1, p2 = repo.changelog.parents(rev)
917 if p2 != nullid and p2 in [x.node for x in self.applied]:
916 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
918 917 return p2
919 918 return p1
920 919
@@ -1591,7 +1590,7 b' class queue(object):'
1591 1590 for hs in repo.branchmap().iterheads():
1592 1591 heads.extend(hs)
1593 1592 if not heads:
1594 heads = [nullid]
1593 heads = [repo.nullid]
1595 1594 if repo.dirstate.p1() not in heads and not exact:
1596 1595 self.ui.status(_(b"(working directory not at a head)\n"))
1597 1596
@@ -1857,7 +1856,7 b' class queue(object):'
1857 1856 fctx = ctx[f]
1858 1857 repo.wwrite(f, fctx.data(), fctx.flags())
1859 1858 repo.dirstate.normal(f)
1860 repo.setparents(qp, nullid)
1859 repo.setparents(qp, repo.nullid)
1861 1860 for patch in reversed(self.applied[start:end]):
1862 1861 self.ui.status(_(b"popping %s\n") % patch.name)
1863 1862 del self.applied[start:end]
@@ -11,7 +11,6 b' import errno'
11 11 import struct
12 12
13 13 from mercurial.i18n import _
14 from mercurial.node import nullid
15 14 from mercurial import (
16 15 bundle2,
17 16 changegroup,
@@ -94,7 +93,7 b' def generateellipsesbundle2('
94 93 raise error.Abort(_(b'depth must be positive, got %d') % depth)
95 94
96 95 heads = set(heads or repo.heads())
97 common = set(common or [nullid])
96 common = set(common or [repo.nullid])
98 97
99 98 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
100 99 repo, common, heads, set(), match, depth=depth
@@ -128,7 +127,7 b' def generate_ellipses_bundle2_for_wideni'
128 127 common,
129 128 known,
130 129 ):
131 common = set(common or [nullid])
130 common = set(common or [repo.nullid])
132 131 # Steps:
133 132 # 1. Send kill for "$known & ::common"
134 133 #
@@ -12,7 +12,6 b' import os'
12 12 from mercurial.i18n import _
13 13 from mercurial.node import (
14 14 hex,
15 nullid,
16 15 short,
17 16 )
18 17 from mercurial import (
@@ -193,7 +192,7 b' def pullbundle2extraprepare(orig, pullop'
193 192 kwargs[b'known'] = [
194 193 hex(ctx.node())
195 194 for ctx in repo.set(b'::%ln', pullop.common)
196 if ctx.node() != nullid
195 if ctx.node() != repo.nullid
197 196 ]
198 197 if not kwargs[b'known']:
199 198 # Mercurial serializes an empty list as '' and deserializes it as
@@ -370,7 +369,7 b' def _widen('
370 369 ds = repo.dirstate
371 370 p1, p2 = ds.p1(), ds.p2()
372 371 with ds.parentchange():
373 ds.setparents(nullid, nullid)
372 ds.setparents(repo.nullid, repo.nullid)
374 373 if isoldellipses:
375 374 with wrappedextraprepare:
376 375 exchange.pull(repo, remote, heads=common)
@@ -380,7 +379,7 b' def _widen('
380 379 known = [
381 380 ctx.node()
382 381 for ctx in repo.set(b'::%ln', common)
383 if ctx.node() != nullid
382 if ctx.node() != repo.nullid
384 383 ]
385 384 with remote.commandexecutor() as e:
386 385 bundle = e.callcommand(
@@ -69,7 +69,7 b' import operator'
69 69 import re
70 70 import time
71 71
72 from mercurial.node import bin, nullid, short
72 from mercurial.node import bin, short
73 73 from mercurial.i18n import _
74 74 from mercurial.pycompat import getattr
75 75 from mercurial.thirdparty import attr
@@ -586,7 +586,7 b' def getoldnodedrevmap(repo, nodelist):'
586 586 tags.tag(
587 587 repo,
588 588 tagname,
589 nullid,
589 repo.nullid,
590 590 message=None,
591 591 user=None,
592 592 date=None,
@@ -1606,7 +1606,7 b' def phabsend(ui, repo, *revs, **opts):'
1606 1606 tags.tag(
1607 1607 repo,
1608 1608 tagname,
1609 nullid,
1609 repo.nullid,
1610 1610 message=None,
1611 1611 user=None,
1612 1612 date=None,
@@ -2,7 +2,10 b' from __future__ import absolute_import'
2 2
3 3 import threading
4 4
5 from mercurial.node import hex, nullid
5 from mercurial.node import (
6 hex,
7 sha1nodeconstants,
8 )
6 9 from mercurial.pycompat import getattr
7 10 from mercurial import (
8 11 mdiff,
@@ -55,7 +58,7 b' class unioncontentstore(basestore.baseun'
55 58 """
56 59 chain = self.getdeltachain(name, node)
57 60
58 if chain[-1][ChainIndicies.BASENODE] != nullid:
61 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
59 62 # If we didn't receive a full chain, throw
60 63 raise KeyError((name, hex(node)))
61 64
@@ -92,7 +95,7 b' class unioncontentstore(basestore.baseun'
92 95 deltabasenode.
93 96 """
94 97 chain = self._getpartialchain(name, node)
95 while chain[-1][ChainIndicies.BASENODE] != nullid:
98 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
96 99 x, x, deltabasename, deltabasenode, x = chain[-1]
97 100 try:
98 101 morechain = self._getpartialchain(deltabasename, deltabasenode)
@@ -187,7 +190,12 b' class remotefilelogcontentstore(basestor'
187 190 # Since remotefilelog content stores only contain full texts, just
188 191 # return that.
189 192 revision = self.get(name, node)
190 return revision, name, nullid, self.getmeta(name, node)
193 return (
194 revision,
195 name,
196 sha1nodeconstants.nullid,
197 self.getmeta(name, node),
198 )
191 199
192 200 def getdeltachain(self, name, node):
193 201 # Since remotefilelog content stores just contain full texts, we return
@@ -195,7 +203,7 b' class remotefilelogcontentstore(basestor'
195 203 # The nullid in the deltabasenode slot indicates that the revision is a
196 204 # fulltext.
197 205 revision = self.get(name, node)
198 return [(name, node, None, nullid, revision)]
206 return [(name, node, None, sha1nodeconstants.nullid, revision)]
199 207
200 208 def getmeta(self, name, node):
201 209 self._sanitizemetacache()
@@ -237,7 +245,12 b' class remotecontentstore(object):'
237 245
238 246 def getdelta(self, name, node):
239 247 revision = self.get(name, node)
240 return revision, name, nullid, self._shared.getmeta(name, node)
248 return (
249 revision,
250 name,
251 sha1nodeconstants.nullid,
252 self._shared.getmeta(name, node),
253 )
241 254
242 255 def getdeltachain(self, name, node):
243 256 # Since our remote content stores just contain full texts, we return a
@@ -245,7 +258,7 b' class remotecontentstore(object):'
245 258 # The nullid in the deltabasenode slot indicates that the revision is a
246 259 # fulltext.
247 260 revision = self.get(name, node)
248 return [(name, node, None, nullid, revision)]
261 return [(name, node, None, sha1nodeconstants.nullid, revision)]
249 262
250 263 def getmeta(self, name, node):
251 264 self._fileservice.prefetch(
@@ -276,11 +289,11 b' class manifestrevlogstore(object):'
276 289
277 290 def getdelta(self, name, node):
278 291 revision = self.get(name, node)
279 return revision, name, nullid, self.getmeta(name, node)
292 return revision, name, self._cl.nullid, self.getmeta(name, node)
280 293
281 294 def getdeltachain(self, name, node):
282 295 revision = self.get(name, node)
283 return [(name, node, None, nullid, revision)]
296 return [(name, node, None, self._cl.nullid, revision)]
284 297
285 298 def getmeta(self, name, node):
286 299 rl = self._revlog(name)
@@ -304,9 +317,9 b' class manifestrevlogstore(object):'
304 317 missing.discard(ancnode)
305 318
306 319 p1, p2 = rl.parents(ancnode)
307 if p1 != nullid and p1 not in known:
320 if p1 != self._cl.nullid and p1 not in known:
308 321 missing.add(p1)
309 if p2 != nullid and p2 not in known:
322 if p2 != self._cl.nullid and p2 not in known:
310 323 missing.add(p2)
311 324
312 325 linknode = self._cl.node(rl.linkrev(ancrev))
@@ -3,7 +3,10 b' from __future__ import absolute_import'
3 3 import struct
4 4 import zlib
5 5
6 from mercurial.node import hex, nullid
6 from mercurial.node import (
7 hex,
8 sha1nodeconstants,
9 )
7 10 from mercurial.i18n import _
8 11 from mercurial import (
9 12 pycompat,
@@ -458,7 +461,7 b' class mutabledatapack(basepack.mutableba'
458 461 rawindex = b''
459 462 fmt = self.INDEXFORMAT
460 463 for node, deltabase, offset, size in entries:
461 if deltabase == nullid:
464 if deltabase == sha1nodeconstants.nullid:
462 465 deltabaselocation = FULLTEXTINDEXMARK
463 466 else:
464 467 # Instead of storing the deltabase node in the index, let's
@@ -12,7 +12,7 b' import zlib'
12 12 from mercurial.node import (
13 13 bin,
14 14 hex,
15 nullid,
15 sha1nodeconstants,
16 16 short,
17 17 )
18 18 from mercurial.i18n import _
@@ -57,9 +57,9 b' def debugremotefilelog(ui, path, **opts)'
57 57 _(b"%s => %s %s %s %s\n")
58 58 % (short(node), short(p1), short(p2), short(linknode), copyfrom)
59 59 )
60 if p1 != nullid:
60 if p1 != sha1nodeconstants.nullid:
61 61 queue.append(p1)
62 if p2 != nullid:
62 if p2 != sha1nodeconstants.nullid:
63 63 queue.append(p2)
64 64
65 65
@@ -152,7 +152,7 b' def debugindex(orig, ui, repo, file_=Non'
152 152 try:
153 153 pp = r.parents(node)
154 154 except Exception:
155 pp = [nullid, nullid]
155 pp = [repo.nullid, repo.nullid]
156 156 ui.write(
157 157 b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
158 158 % (
@@ -197,7 +197,7 b' def debugindexdot(orig, ui, repo, file_)'
197 197 node = r.node(i)
198 198 pp = r.parents(node)
199 199 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
200 if pp[1] != nullid:
200 if pp[1] != repo.nullid:
201 201 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
202 202 ui.write(b"}\n")
203 203
@@ -212,7 +212,7 b' def verifyremotefilelog(ui, path, **opts'
212 212 filepath = os.path.join(root, file)
213 213 size, firstnode, mapping = parsefileblob(filepath, decompress)
214 214 for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
215 if linknode == nullid:
215 if linknode == sha1nodeconstants.nullid:
216 216 actualpath = os.path.relpath(root, path)
217 217 key = fileserverclient.getcachekey(
218 218 b"reponame", actualpath, file
@@ -371,7 +371,7 b' def _sanitycheck(ui, nodes, bases):'
371 371 current = node
372 372 deltabase = bases[current]
373 373
374 while deltabase != nullid:
374 while deltabase != sha1nodeconstants.nullid:
375 375 if deltabase not in nodes:
376 376 ui.warn(
377 377 (
@@ -397,7 +397,7 b' def _sanitycheck(ui, nodes, bases):'
397 397 deltabase = bases[current]
398 398 # Since ``node`` begins a valid chain, reset/memoize its base to nullid
399 399 # so we don't traverse it again.
400 bases[node] = nullid
400 bases[node] = sha1nodeconstants.nullid
401 401 return failures
402 402
403 403
@@ -14,7 +14,7 b' import time'
14 14 import zlib
15 15
16 16 from mercurial.i18n import _
17 from mercurial.node import bin, hex, nullid
17 from mercurial.node import bin, hex
18 18 from mercurial import (
19 19 error,
20 20 pycompat,
@@ -599,9 +599,13 b' class fileserverclient(object):'
599 599
600 600 # partition missing nodes into nullid and not-nullid so we can
601 601 # warn about this filtering potentially shadowing bugs.
602 nullids = len([None for unused, id in missingids if id == nullid])
602 nullids = len(
603 [None for unused, id in missingids if id == self.repo.nullid]
604 )
603 605 if nullids:
604 missingids = [(f, id) for f, id in missingids if id != nullid]
606 missingids = [
607 (f, id) for f, id in missingids if id != self.repo.nullid
608 ]
605 609 repo.ui.develwarn(
606 610 (
607 611 b'remotefilelog not fetching %d null revs'
@@ -2,7 +2,10 b' from __future__ import absolute_import'
2 2
3 3 import struct
4 4
5 from mercurial.node import hex, nullid
5 from mercurial.node import (
6 hex,
7 sha1nodeconstants,
8 )
6 9 from mercurial import (
7 10 pycompat,
8 11 util,
@@ -147,9 +150,9 b' class historypack(basepack.basepack):'
147 150 pending.remove(ancnode)
148 151 p1node = entry[ANC_P1NODE]
149 152 p2node = entry[ANC_P2NODE]
150 if p1node != nullid and p1node not in known:
153 if p1node != sha1nodeconstants.nullid and p1node not in known:
151 154 pending.add(p1node)
152 if p2node != nullid and p2node not in known:
155 if p2node != sha1nodeconstants.nullid and p2node not in known:
153 156 pending.add(p2node)
154 157
155 158 yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom)
@@ -457,9 +460,9 b' class mutablehistorypack(basepack.mutabl'
457 460 def parentfunc(node):
458 461 x, p1, p2, x, x, x = entrymap[node]
459 462 parents = []
460 if p1 != nullid:
463 if p1 != sha1nodeconstants.nullid:
461 464 parents.append(p1)
462 if p2 != nullid:
465 if p2 != sha1nodeconstants.nullid:
463 466 parents.append(p2)
464 467 return parents
465 468
@@ -1,6 +1,9 b''
1 1 from __future__ import absolute_import
2 2
3 from mercurial.node import hex, nullid
3 from mercurial.node import (
4 hex,
5 sha1nodeconstants,
6 )
4 7 from . import (
5 8 basestore,
6 9 shallowutil,
@@ -51,9 +54,9 b' class unionmetadatastore(basestore.baseu'
51 54 missing.append((name, node))
52 55 continue
53 56 p1, p2, linknode, copyfrom = value
54 if p1 != nullid and p1 not in known:
57 if p1 != sha1nodeconstants.nullid and p1 not in known:
55 58 queue.append((copyfrom or curname, p1))
56 if p2 != nullid and p2 not in known:
59 if p2 != sha1nodeconstants.nullid and p2 not in known:
57 60 queue.append((curname, p2))
58 61 return missing
59 62
@@ -9,7 +9,7 b' from __future__ import absolute_import'
9 9 import collections
10 10 import time
11 11
12 from mercurial.node import bin, hex, nullid, nullrev
12 from mercurial.node import bin, hex, nullrev
13 13 from mercurial import (
14 14 ancestor,
15 15 context,
@@ -35,7 +35,7 b' class remotefilectx(context.filectx):'
35 35 ancestormap=None,
36 36 ):
37 37 if fileid == nullrev:
38 fileid = nullid
38 fileid = repo.nullid
39 39 if fileid and len(fileid) == 40:
40 40 fileid = bin(fileid)
41 41 super(remotefilectx, self).__init__(
@@ -78,7 +78,7 b' class remotefilectx(context.filectx):'
78 78
79 79 @propertycache
80 80 def _linkrev(self):
81 if self._filenode == nullid:
81 if self._filenode == self._repo.nullid:
82 82 return nullrev
83 83
84 84 ancestormap = self.ancestormap()
@@ -174,7 +174,7 b' class remotefilectx(context.filectx):'
174 174
175 175 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
176 176 results = []
177 if p1 != nullid:
177 if p1 != repo.nullid:
178 178 path = copyfrom or self._path
179 179 flog = repo.file(path)
180 180 p1ctx = remotefilectx(
@@ -183,7 +183,7 b' class remotefilectx(context.filectx):'
183 183 p1ctx._descendantrev = self.rev()
184 184 results.append(p1ctx)
185 185
186 if p2 != nullid:
186 if p2 != repo.nullid:
187 187 path = self._path
188 188 flog = repo.file(path)
189 189 p2ctx = remotefilectx(
@@ -504,25 +504,25 b' class remoteworkingfilectx(context.worki'
504 504 if renamed:
505 505 p1 = renamed
506 506 else:
507 p1 = (path, pcl[0]._manifest.get(path, nullid))
507 p1 = (path, pcl[0]._manifest.get(path, self._repo.nullid))
508 508
509 p2 = (path, nullid)
509 p2 = (path, self._repo.nullid)
510 510 if len(pcl) > 1:
511 p2 = (path, pcl[1]._manifest.get(path, nullid))
511 p2 = (path, pcl[1]._manifest.get(path, self._repo.nullid))
512 512
513 513 m = {}
514 if p1[1] != nullid:
514 if p1[1] != self._repo.nullid:
515 515 p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
516 516 m.update(p1ctx.filelog().ancestormap(p1[1]))
517 517
518 if p2[1] != nullid:
518 if p2[1] != self._repo.nullid:
519 519 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
520 520 m.update(p2ctx.filelog().ancestormap(p2[1]))
521 521
522 522 copyfrom = b''
523 523 if renamed:
524 524 copyfrom = renamed[0]
525 m[None] = (p1[1], p2[1], nullid, copyfrom)
525 m[None] = (p1[1], p2[1], self._repo.nullid, copyfrom)
526 526 self._ancestormap = m
527 527
528 528 return self._ancestormap
@@ -10,12 +10,7 b' from __future__ import absolute_import'
10 10 import collections
11 11 import os
12 12
13 from mercurial.node import (
14 bin,
15 nullid,
16 wdirfilenodeids,
17 wdirid,
18 )
13 from mercurial.node import bin
19 14 from mercurial.i18n import _
20 15 from mercurial import (
21 16 ancestor,
@@ -100,7 +95,7 b' class remotefilelog(object):'
100 95
101 96 pancestors = {}
102 97 queue = []
103 if realp1 != nullid:
98 if realp1 != self.repo.nullid:
104 99 p1flog = self
105 100 if copyfrom:
106 101 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
@@ -108,7 +103,7 b' class remotefilelog(object):'
108 103 pancestors.update(p1flog.ancestormap(realp1))
109 104 queue.append(realp1)
110 105 visited.add(realp1)
111 if p2 != nullid:
106 if p2 != self.repo.nullid:
112 107 pancestors.update(self.ancestormap(p2))
113 108 queue.append(p2)
114 109 visited.add(p2)
@@ -129,10 +124,10 b' class remotefilelog(object):'
129 124 pacopyfrom,
130 125 )
131 126
132 if pa1 != nullid and pa1 not in visited:
127 if pa1 != self.repo.nullid and pa1 not in visited:
133 128 queue.append(pa1)
134 129 visited.add(pa1)
135 if pa2 != nullid and pa2 not in visited:
130 if pa2 != self.repo.nullid and pa2 not in visited:
136 131 queue.append(pa2)
137 132 visited.add(pa2)
138 133
@@ -238,7 +233,7 b' class remotefilelog(object):'
238 233 returns True if text is different than what is stored.
239 234 """
240 235
241 if node == nullid:
236 if node == self.repo.nullid:
242 237 return True
243 238
244 239 nodetext = self.read(node)
@@ -275,13 +270,13 b' class remotefilelog(object):'
275 270 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
276 271
277 272 def parents(self, node):
278 if node == nullid:
279 return nullid, nullid
273 if node == self.repo.nullid:
274 return self.repo.nullid, self.repo.nullid
280 275
281 276 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
282 277 p1, p2, linknode, copyfrom = ancestormap[node]
283 278 if copyfrom:
284 p1 = nullid
279 p1 = self.repo.nullid
285 280
286 281 return p1, p2
287 282
@@ -317,8 +312,8 b' class remotefilelog(object):'
317 312 if prevnode is None:
318 313 basenode = prevnode = p1
319 314 if basenode == node:
320 basenode = nullid
321 if basenode != nullid:
315 basenode = self.repo.nullid
316 if basenode != self.repo.nullid:
322 317 revision = None
323 318 delta = self.revdiff(basenode, node)
324 319 else:
@@ -380,13 +375,16 b' class remotefilelog(object):'
380 375 this is generally only used for bundling and communicating with vanilla
381 376 hg clients.
382 377 """
383 if node == nullid:
378 if node == self.repo.nullid:
384 379 return b""
385 380 if len(node) != 20:
386 381 raise error.LookupError(
387 382 node, self.filename, _(b'invalid revision input')
388 383 )
389 if node == wdirid or node in wdirfilenodeids:
384 if (
385 node == self.repo.nodeconstants.wdirid
386 or node in self.repo.nodeconstants.wdirfilenodeids
387 ):
390 388 raise error.WdirUnsupported
391 389
392 390 store = self.repo.contentstore
@@ -432,8 +430,8 b' class remotefilelog(object):'
432 430 return self.repo.metadatastore.getancestors(self.filename, node)
433 431
434 432 def ancestor(self, a, b):
435 if a == nullid or b == nullid:
436 return nullid
433 if a == self.repo.nullid or b == self.repo.nullid:
434 return self.repo.nullid
437 435
438 436 revmap, parentfunc = self._buildrevgraph(a, b)
439 437 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -442,13 +440,13 b' class remotefilelog(object):'
442 440 if ancs:
443 441 # choose a consistent winner when there's a tie
444 442 return min(map(nodemap.__getitem__, ancs))
445 return nullid
443 return self.repo.nullid
446 444
447 445 def commonancestorsheads(self, a, b):
448 446 """calculate all the heads of the common ancestors of nodes a and b"""
449 447
450 if a == nullid or b == nullid:
451 return nullid
448 if a == self.repo.nullid or b == self.repo.nullid:
449 return self.repo.nullid
452 450
453 451 revmap, parentfunc = self._buildrevgraph(a, b)
454 452 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -472,10 +470,10 b' class remotefilelog(object):'
472 470 p1, p2, linknode, copyfrom = pdata
473 471 # Don't follow renames (copyfrom).
474 472 # remotefilectx.ancestor does that.
475 if p1 != nullid and not copyfrom:
473 if p1 != self.repo.nullid and not copyfrom:
476 474 parents.append(p1)
477 475 allparents.add(p1)
478 if p2 != nullid:
476 if p2 != self.repo.nullid:
479 477 parents.append(p2)
480 478 allparents.add(p2)
481 479
@@ -13,7 +13,7 b' import time'
13 13 import zlib
14 14
15 15 from mercurial.i18n import _
16 from mercurial.node import bin, hex, nullid
16 from mercurial.node import bin, hex
17 17 from mercurial.pycompat import open
18 18 from mercurial import (
19 19 changegroup,
@@ -242,7 +242,7 b' def _loadfileblob(repo, cachepath, path,'
242 242 filecachepath = os.path.join(cachepath, path, hex(node))
243 243 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
244 244 filectx = repo.filectx(path, fileid=node)
245 if filectx.node() == nullid:
245 if filectx.node() == repo.nullid:
246 246 repo.changelog = changelog.changelog(repo.svfs)
247 247 filectx = repo.filectx(path, fileid=node)
248 248
@@ -284,7 +284,7 b' def getflogheads(repo, proto, path):'
284 284 """A server api for requesting a filelog's heads"""
285 285 flog = repo.file(path)
286 286 heads = flog.heads()
287 return b'\n'.join((hex(head) for head in heads if head != nullid))
287 return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
288 288
289 289
290 290 def getfile(repo, proto, file, node):
@@ -302,7 +302,7 b' def getfile(repo, proto, file, node):'
302 302 if not cachepath:
303 303 cachepath = os.path.join(repo.path, b"remotefilelogcache")
304 304 node = bin(node.strip())
305 if node == nullid:
305 if node == repo.nullid:
306 306 return b'0\0'
307 307 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
308 308
@@ -327,7 +327,7 b' def getfiles(repo, proto):'
327 327 break
328 328
329 329 node = bin(request[:40])
330 if node == nullid:
330 if node == repo.nullid:
331 331 yield b'0\n'
332 332 continue
333 333
@@ -380,8 +380,8 b' def createfileblob(filectx):'
380 380 ancestortext = b""
381 381 for ancestorctx in ancestors:
382 382 parents = ancestorctx.parents()
383 p1 = nullid
384 p2 = nullid
383 p1 = repo.nullid
384 p2 = repo.nullid
385 385 if len(parents) > 0:
386 386 p1 = parents[0].filenode()
387 387 if len(parents) > 1:
@@ -4,10 +4,7 b' import os'
4 4 import time
5 5
6 6 from mercurial.i18n import _
7 from mercurial.node import (
8 nullid,
9 short,
10 )
7 from mercurial.node import short
11 8 from mercurial import (
12 9 encoding,
13 10 error,
@@ -586,7 +583,7 b' class repacker(object):'
586 583 # Create one contiguous chain and reassign deltabases.
587 584 for i, node in enumerate(orphans):
588 585 if i == 0:
589 deltabases[node] = (nullid, 0)
586 deltabases[node] = (self.repo.nullid, 0)
590 587 else:
591 588 parent = orphans[i - 1]
592 589 deltabases[node] = (parent, deltabases[parent][1] + 1)
@@ -676,8 +673,8 b' class repacker(object):'
676 673 # of immediate child
677 674 deltatuple = deltabases.get(node, None)
678 675 if deltatuple is None:
679 deltabase, chainlen = nullid, 0
680 deltabases[node] = (nullid, 0)
676 deltabase, chainlen = self.repo.nullid, 0
677 deltabases[node] = (self.repo.nullid, 0)
681 678 nobase.add(node)
682 679 else:
683 680 deltabase, chainlen = deltatuple
@@ -692,7 +689,7 b' class repacker(object):'
692 689 # file was copied from elsewhere. So don't attempt to do any
693 690 # deltas with the other file.
694 691 if copyfrom:
695 p1 = nullid
692 p1 = self.repo.nullid
696 693
697 694 if chainlen < maxchainlen:
698 695 # Record this child as the delta base for its parents.
@@ -700,9 +697,9 b' class repacker(object):'
700 697 # many children, and this will only choose the last one.
701 698 # TODO: record all children and try all deltas to find
702 699 # best
703 if p1 != nullid:
700 if p1 != self.repo.nullid:
704 701 deltabases[p1] = (node, chainlen + 1)
705 if p2 != nullid:
702 if p2 != self.repo.nullid:
706 703 deltabases[p2] = (node, chainlen + 1)
707 704
708 705 # experimental config: repack.chainorphansbysize
@@ -719,7 +716,7 b' class repacker(object):'
719 716 # TODO: Optimize the deltachain fetching. Since we're
720 717 # iterating over the different version of the file, we may
721 718 # be fetching the same deltachain over and over again.
722 if deltabase != nullid:
719 if deltabase != self.repo.nullid:
723 720 deltaentry = self.data.getdelta(filename, node)
724 721 delta, deltabasename, origdeltabase, meta = deltaentry
725 722 size = meta.get(constants.METAKEYSIZE)
@@ -791,9 +788,9 b' class repacker(object):'
791 788 # If copyfrom == filename, it means the copy history
792 789 # went to come other file, then came back to this one, so we
793 790 # should continue processing it.
794 if p1 != nullid and copyfrom != filename:
791 if p1 != self.repo.nullid and copyfrom != filename:
795 792 dontprocess.add(p1)
796 if p2 != nullid:
793 if p2 != self.repo.nullid:
797 794 dontprocess.add(p2)
798 795 continue
799 796
@@ -814,9 +811,9 b' class repacker(object):'
814 811 def parentfunc(node):
815 812 p1, p2, linknode, copyfrom = ancestors[node]
816 813 parents = []
817 if p1 != nullid:
814 if p1 != self.repo.nullid:
818 815 parents.append(p1)
819 if p2 != nullid:
816 if p2 != self.repo.nullid:
820 817 parents.append(p2)
821 818 return parents
822 819
@@ -7,7 +7,7 b''
7 7 from __future__ import absolute_import
8 8
9 9 from mercurial.i18n import _
10 from mercurial.node import bin, hex, nullid
10 from mercurial.node import bin, hex
11 11 from mercurial import (
12 12 bundlerepo,
13 13 changegroup,
@@ -143,7 +143,7 b' class shallowcg1packer(changegroup.cgpac'
143 143
144 144 def nodechunk(self, revlog, node, prevnode, linknode):
145 145 prefix = b''
146 if prevnode == nullid:
146 if prevnode == revlog.nullid:
147 147 delta = revlog.rawdata(node)
148 148 prefix = mdiff.trivialdiffheader(len(delta))
149 149 else:
@@ -245,7 +245,7 b' def addchangegroupfiles('
245 245 processed = set()
246 246
247 247 def available(f, node, depf, depnode):
248 if depnode != nullid and (depf, depnode) not in processed:
248 if depnode != repo.nullid and (depf, depnode) not in processed:
249 249 if not (depf, depnode) in revisiondatas:
250 250 # It's not in the changegroup, assume it's already
251 251 # in the repo
@@ -267,7 +267,7 b' def addchangegroupfiles('
267 267 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
268 268
269 269 for dependent in dependents:
270 if dependent == nullid or (f, dependent) in revisiondatas:
270 if dependent == repo.nullid or (f, dependent) in revisiondatas:
271 271 continue
272 272 prefetchfiles.append((f, hex(dependent)))
273 273
@@ -306,7 +306,7 b' def addchangegroupfiles('
306 306 continue
307 307
308 308 for p in [p1, p2]:
309 if p != nullid:
309 if p != repo.nullid:
310 310 if not available(f, node, f, p):
311 311 continue
312 312
@@ -9,7 +9,7 b' from __future__ import absolute_import'
9 9 import os
10 10
11 11 from mercurial.i18n import _
12 from mercurial.node import hex, nullid, nullrev
12 from mercurial.node import hex, nullrev
13 13 from mercurial import (
14 14 encoding,
15 15 error,
@@ -206,8 +206,8 b' def wraprepo(repo):'
206 206 m1 = ctx.p1().manifest()
207 207 files = []
208 208 for f in ctx.modified() + ctx.added():
209 fparent1 = m1.get(f, nullid)
210 if fparent1 != nullid:
209 fparent1 = m1.get(f, self.nullid)
210 if fparent1 != self.nullid:
211 211 files.append((f, hex(fparent1)))
212 212 self.fileservice.prefetch(files)
213 213 return super(shallowrepository, self).commitctx(
@@ -52,7 +52,6 b' import zlib'
52 52
53 53 from mercurial.i18n import _
54 54 from mercurial.node import (
55 nullid,
56 55 nullrev,
57 56 sha1nodeconstants,
58 57 short,
@@ -366,12 +365,12 b' class sqlitefilestore(object):'
366 365 )
367 366
368 367 if p1rev == nullrev:
369 p1node = nullid
368 p1node = sha1nodeconstants.nullid
370 369 else:
371 370 p1node = self._revtonode[p1rev]
372 371
373 372 if p2rev == nullrev:
374 p2node = nullid
373 p2node = sha1nodeconstants.nullid
375 374 else:
376 375 p2node = self._revtonode[p2rev]
377 376
@@ -400,7 +399,7 b' class sqlitefilestore(object):'
400 399 return iter(pycompat.xrange(len(self._revisions)))
401 400
402 401 def hasnode(self, node):
403 if node == nullid:
402 if node == sha1nodeconstants.nullid:
404 403 return False
405 404
406 405 return node in self._nodetorev
@@ -411,8 +410,8 b' class sqlitefilestore(object):'
411 410 )
412 411
413 412 def parents(self, node):
414 if node == nullid:
415 return nullid, nullid
413 if node == sha1nodeconstants.nullid:
414 return sha1nodeconstants.nullid, sha1nodeconstants.nullid
416 415
417 416 if node not in self._revisions:
418 417 raise error.LookupError(node, self._path, _(b'no node'))
@@ -431,7 +430,7 b' class sqlitefilestore(object):'
431 430 return entry.p1rev, entry.p2rev
432 431
433 432 def rev(self, node):
434 if node == nullid:
433 if node == sha1nodeconstants.nullid:
435 434 return nullrev
436 435
437 436 if node not in self._nodetorev:
@@ -441,7 +440,7 b' class sqlitefilestore(object):'
441 440
442 441 def node(self, rev):
443 442 if rev == nullrev:
444 return nullid
443 return sha1nodeconstants.nullid
445 444
446 445 if rev not in self._revtonode:
447 446 raise IndexError(rev)
@@ -485,7 +484,7 b' class sqlitefilestore(object):'
485 484 def heads(self, start=None, stop=None):
486 485 if start is None and stop is None:
487 486 if not len(self):
488 return [nullid]
487 return [sha1nodeconstants.nullid]
489 488
490 489 startrev = self.rev(start) if start is not None else nullrev
491 490 stoprevs = {self.rev(n) for n in stop or []}
@@ -529,7 +528,7 b' class sqlitefilestore(object):'
529 528 return len(self.revision(node))
530 529
531 530 def revision(self, node, raw=False, _verifyhash=True):
532 if node in (nullid, nullrev):
531 if node in (sha1nodeconstants.nullid, nullrev):
533 532 return b''
534 533
535 534 if isinstance(node, int):
@@ -596,7 +595,7 b' class sqlitefilestore(object):'
596 595 b'unhandled value for nodesorder: %s' % nodesorder
597 596 )
598 597
599 nodes = [n for n in nodes if n != nullid]
598 nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
600 599
601 600 if not nodes:
602 601 return
@@ -705,12 +704,12 b' class sqlitefilestore(object):'
705 704 raise SQLiteStoreError(b'unhandled revision flag')
706 705
707 706 if maybemissingparents:
708 if p1 != nullid and not self.hasnode(p1):
709 p1 = nullid
707 if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
708 p1 = sha1nodeconstants.nullid
710 709 storeflags |= FLAG_MISSING_P1
711 710
712 if p2 != nullid and not self.hasnode(p2):
713 p2 = nullid
711 if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
712 p2 = sha1nodeconstants.nullid
714 713 storeflags |= FLAG_MISSING_P2
715 714
716 715 baserev = self.rev(deltabase)
@@ -736,7 +735,10 b' class sqlitefilestore(object):'
736 735 # Possibly reset parents to make them proper.
737 736 entry = self._revisions[node]
738 737
739 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
738 if (
739 entry.flags & FLAG_MISSING_P1
740 and p1 != sha1nodeconstants.nullid
741 ):
740 742 entry.p1node = p1
741 743 entry.p1rev = self._nodetorev[p1]
742 744 entry.flags &= ~FLAG_MISSING_P1
@@ -746,7 +748,10 b' class sqlitefilestore(object):'
746 748 (self._nodetorev[p1], entry.flags, entry.rid),
747 749 )
748 750
749 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
751 if (
752 entry.flags & FLAG_MISSING_P2
753 and p2 != sha1nodeconstants.nullid
754 ):
750 755 entry.p2node = p2
751 756 entry.p2rev = self._nodetorev[p2]
752 757 entry.flags &= ~FLAG_MISSING_P2
@@ -761,7 +766,7 b' class sqlitefilestore(object):'
761 766 empty = False
762 767 continue
763 768
764 if deltabase == nullid:
769 if deltabase == sha1nodeconstants.nullid:
765 770 text = mdiff.patch(b'', delta)
766 771 storedelta = None
767 772 else:
@@ -1012,7 +1017,7 b' class sqlitefilestore(object):'
1012 1017 assert revisiondata is not None
1013 1018 deltabase = p1
1014 1019
1015 if deltabase == nullid:
1020 if deltabase == sha1nodeconstants.nullid:
1016 1021 delta = revisiondata
1017 1022 else:
1018 1023 delta = mdiff.textdiff(
@@ -1021,7 +1026,7 b' class sqlitefilestore(object):'
1021 1026
1022 1027 # File index stores a pointer to its delta and the parent delta.
1023 1028 # The parent delta is stored via a pointer to the fileindex PK.
1024 if deltabase == nullid:
1029 if deltabase == sha1nodeconstants.nullid:
1025 1030 baseid = None
1026 1031 else:
1027 1032 baseid = self._revisions[deltabase].rid
@@ -1055,12 +1060,12 b' class sqlitefilestore(object):'
1055 1060
1056 1061 rev = len(self)
1057 1062
1058 if p1 == nullid:
1063 if p1 == sha1nodeconstants.nullid:
1059 1064 p1rev = nullrev
1060 1065 else:
1061 1066 p1rev = self._nodetorev[p1]
1062 1067
1063 if p2 == nullid:
1068 if p2 == sha1nodeconstants.nullid:
1064 1069 p2rev = nullrev
1065 1070 else:
1066 1071 p2rev = self._nodetorev[p2]
@@ -22,7 +22,6 b' from mercurial.pycompat import open'
22 22 from mercurial.node import (
23 23 bin,
24 24 hex,
25 nullid,
26 25 short,
27 26 )
28 27 from mercurial import (
@@ -134,6 +133,7 b' class transplants(object):'
134 133 class transplanter(object):
135 134 def __init__(self, ui, repo, opts):
136 135 self.ui = ui
136 self.repo = repo
137 137 self.path = repo.vfs.join(b'transplant')
138 138 self.opener = vfsmod.vfs(self.path)
139 139 self.transplants = transplants(
@@ -221,7 +221,7 b' class transplanter(object):'
221 221 exchange.pull(repo, source.peer(), heads=[node])
222 222
223 223 skipmerge = False
224 if parents[1] != nullid:
224 if parents[1] != repo.nullid:
225 225 if not opts.get(b'parent'):
226 226 self.ui.note(
227 227 _(b'skipping merge changeset %d:%s\n')
@@ -516,7 +516,7 b' class transplanter(object):'
516 516 def parselog(self, fp):
517 517 parents = []
518 518 message = []
519 node = nullid
519 node = self.repo.nullid
520 520 inmsg = False
521 521 user = None
522 522 date = None
@@ -568,7 +568,7 b' class transplanter(object):'
568 568 def matchfn(node):
569 569 if self.applied(repo, node, root):
570 570 return False
571 if source.changelog.parents(node)[1] != nullid:
571 if source.changelog.parents(node)[1] != repo.nullid:
572 572 return False
573 573 extra = source.changelog.read(node)[5]
574 574 cnode = extra.get(b'transplant_source')
@@ -804,7 +804,7 b' def _dotransplant(ui, repo, *revs, **opt'
804 804 tp = transplanter(ui, repo, opts)
805 805
806 806 p1 = repo.dirstate.p1()
807 if len(repo) > 0 and p1 == nullid:
807 if len(repo) > 0 and p1 == repo.nullid:
808 808 raise error.Abort(_(b'no revision checked out'))
809 809 if opts.get(b'continue'):
810 810 if not tp.canresume():
@@ -20,7 +20,6 b' added and removed in the working directo'
20 20 from __future__ import absolute_import
21 21
22 22 from mercurial.i18n import _
23 from mercurial.node import nullid
24 23
25 24 from mercurial import (
26 25 cmdutil,
@@ -113,7 +112,7 b' def _commitfiltered('
113 112
114 113 new = context.memctx(
115 114 repo,
116 parents=[base.node(), nullid],
115 parents=[base.node(), repo.nullid],
117 116 text=message,
118 117 files=files,
119 118 filectxfn=filectxfn,
@@ -15,7 +15,6 b' from .node import ('
15 15 bin,
16 16 hex,
17 17 short,
18 wdirid,
19 18 )
20 19 from .pycompat import getattr
21 20 from . import (
@@ -642,7 +641,7 b' def binaryencode(repo, bookmarks):'
642 641 binarydata = []
643 642 for book, node in bookmarks:
644 643 if not node: # None or ''
645 node = wdirid
644 node = repo.nodeconstants.wdirid
646 645 binarydata.append(_binaryentry.pack(node, len(book)))
647 646 binarydata.append(book)
648 647 return b''.join(binarydata)
@@ -674,7 +673,7 b' def binarydecode(repo, stream):'
674 673 if len(bookmark) < length:
675 674 if entry:
676 675 raise error.Abort(_(b'bad bookmark stream'))
677 if node == wdirid:
676 if node == repo.nodeconstants.wdirid:
678 677 node = None
679 678 books.append((bookmark, node))
680 679 return books
@@ -12,7 +12,6 b' import struct'
12 12 from .node import (
13 13 bin,
14 14 hex,
15 nullid,
16 15 nullrev,
17 16 )
18 17 from . import (
@@ -189,7 +188,7 b' class branchcache(object):'
189 188 self,
190 189 repo,
191 190 entries=(),
192 tipnode=nullid,
191 tipnode=None,
193 192 tiprev=nullrev,
194 193 filteredhash=None,
195 194 closednodes=None,
@@ -200,7 +199,10 b' class branchcache(object):'
200 199 has a given node or not. If it's not provided, we assume that every node
201 200 we have exists in changelog"""
202 201 self._repo = repo
203 self.tipnode = tipnode
202 if tipnode is None:
203 self.tipnode = repo.nullid
204 else:
205 self.tipnode = tipnode
204 206 self.tiprev = tiprev
205 207 self.filteredhash = filteredhash
206 208 # closednodes is a set of nodes that close their branch. If the branch
@@ -536,7 +538,7 b' class branchcache(object):'
536 538
537 539 if not self.validfor(repo):
538 540 # cache key are not valid anymore
539 self.tipnode = nullid
541 self.tipnode = repo.nullid
540 542 self.tiprev = nullrev
541 543 for heads in self.iterheads():
542 544 tiprev = max(cl.rev(node) for node in heads)
@@ -158,7 +158,6 b' import sys'
158 158 from .i18n import _
159 159 from .node import (
160 160 hex,
161 nullid,
162 161 short,
163 162 )
164 163 from . import (
@@ -2576,7 +2575,7 b' def widen_bundle('
2576 2575 fullnodes=commonnodes,
2577 2576 )
2578 2577 cgdata = packer.generate(
2579 {nullid},
2578 {repo.nullid},
2580 2579 list(commonnodes),
2581 2580 False,
2582 2581 b'narrow_widen',
@@ -19,7 +19,6 b' import shutil'
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 nullid,
23 22 nullrev,
24 23 )
25 24
@@ -447,7 +446,9 b' class bundlerepository(object):'
447 446 return encoding.getcwd() # always outside the repo
448 447
449 448 # Check if parents exist in localrepo before setting
450 def setparents(self, p1, p2=nullid):
449 def setparents(self, p1, p2=None):
450 if p2 is None:
451 p2 = self.nullid
451 452 p1rev = self.changelog.rev(p1)
452 453 p2rev = self.changelog.rev(p2)
453 454 msg = _(b"setting parent to node %s that only exists in the bundle\n")
@@ -15,7 +15,6 b' import weakref'
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 nullid,
19 18 nullrev,
20 19 short,
21 20 )
@@ -673,7 +672,7 b' def _revisiondeltatochunks(repo, delta, '
673 672
674 673 if delta.delta is not None:
675 674 prefix, data = b'', delta.delta
676 elif delta.basenode == nullid:
675 elif delta.basenode == repo.nullid:
677 676 data = delta.revision
678 677 prefix = mdiff.trivialdiffheader(len(data))
679 678 else:
@@ -11,7 +11,6 b' from .i18n import _'
11 11 from .node import (
12 12 bin,
13 13 hex,
14 nullid,
15 14 )
16 15 from .thirdparty import attr
17 16
@@ -221,7 +220,7 b' class changelogrevision(object):'
221 220
222 221 def __new__(cls, cl, text, sidedata, cpsd):
223 222 if not text:
224 return _changelogrevision(extra=_defaultextra, manifest=nullid)
223 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
225 224
226 225 self = super(changelogrevision, cls).__new__(cls)
227 226 # We could return here and implement the following as an __init__.
@@ -15,7 +15,6 b' import re'
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 nullid,
19 18 nullrev,
20 19 short,
21 20 )
@@ -1097,7 +1096,7 b' def bailifchanged(repo, merge=True, hint'
1097 1096 'hint' is the usual hint given to Abort exception.
1098 1097 """
1099 1098
1100 if merge and repo.dirstate.p2() != nullid:
1099 if merge and repo.dirstate.p2() != repo.nullid:
1101 1100 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1102 1101 st = repo.status()
1103 1102 if st.modified or st.added or st.removed or st.deleted:
@@ -2104,7 +2103,7 b' def _exportsingle(repo, ctx, fm, match, '
2104 2103 if parents:
2105 2104 prev = parents[0]
2106 2105 else:
2107 prev = nullid
2106 prev = repo.nullid
2108 2107
2109 2108 fm.context(ctx=ctx)
2110 2109 fm.plain(b'# HG changeset patch\n')
@@ -2967,7 +2966,7 b' def amend(ui, repo, old, extra, pats, op'
2967 2966 ms.reset()
2968 2967
2969 2968 # Reroute the working copy parent to the new changeset
2970 repo.setparents(newid, nullid)
2969 repo.setparents(newid, repo.nullid)
2971 2970
2972 2971 # Fixing the dirstate because localrepo.commitctx does not update
2973 2972 # it. This is rather convenient because we did not need to update
@@ -3322,7 +3321,7 b' def revert(ui, repo, ctx, *pats, **opts)'
3322 3321
3323 3322 # in case of merge, files that are actually added can be reported as
3324 3323 # modified, we need to post process the result
3325 if p2 != nullid:
3324 if p2 != repo.nullid:
3326 3325 mergeadd = set(dsmodified)
3327 3326 for path in dsmodified:
3328 3327 if path in mf:
@@ -3593,7 +3592,7 b' def _performrevert('
3593 3592 # We're reverting to our parent. If possible, we'd like status
3594 3593 # to report the file as clean. We have to use normallookup for
3595 3594 # merges to avoid losing information about merged/dirty files.
3596 if p2 != nullid:
3595 if p2 != repo.nullid:
3597 3596 normal = repo.dirstate.normallookup
3598 3597 else:
3599 3598 normal = repo.dirstate.normal
@@ -3690,7 +3689,7 b' def _performrevert('
3690 3689 repo.dirstate.add(f)
3691 3690
3692 3691 normal = repo.dirstate.normallookup
3693 if node == parent and p2 == nullid:
3692 if node == parent and p2 == repo.nullid:
3694 3693 normal = repo.dirstate.normal
3695 3694 for f in actions[b'undelete'][0]:
3696 3695 if interactive:
@@ -15,10 +15,8 b' import sys'
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 nullid,
19 18 nullrev,
20 19 short,
21 wdirhex,
22 20 wdirrev,
23 21 )
24 22 from .pycompat import open
@@ -486,7 +484,7 b' def annotate(ui, repo, *pats, **opts):'
486 484 return b'%d ' % rev
487 485
488 486 def formathex(h):
489 if h == wdirhex:
487 if h == repo.nodeconstants.wdirhex:
490 488 return b'%s+' % shorthex(hex(ctx.p1().node()))
491 489 else:
492 490 return b'%s ' % shorthex(h)
@@ -809,9 +807,9 b' def _dobackout(ui, repo, node=None, rev='
809 807 )
810 808
811 809 p1, p2 = repo.changelog.parents(node)
812 if p1 == nullid:
810 if p1 == repo.nullid:
813 811 raise error.InputError(_(b'cannot backout a change with no parents'))
814 if p2 != nullid:
812 if p2 != repo.nullid:
815 813 if not opts.get(b'parent'):
816 814 raise error.InputError(_(b'cannot backout a merge changeset'))
817 815 p = repo.lookup(opts[b'parent'])
@@ -1085,7 +1083,7 b' def bisect('
1085 1083 )
1086 1084 else:
1087 1085 node, p2 = repo.dirstate.parents()
1088 if p2 != nullid:
1086 if p2 != repo.nullid:
1089 1087 raise error.StateError(_(b'current bisect revision is a merge'))
1090 1088 if rev:
1091 1089 if not nodes:
@@ -4847,7 +4845,7 b' def merge(ui, repo, node=None, **opts):'
4847 4845
4848 4846 opts = pycompat.byteskwargs(opts)
4849 4847 abort = opts.get(b'abort')
4850 if abort and repo.dirstate.p2() == nullid:
4848 if abort and repo.dirstate.p2() == repo.nullid:
4851 4849 cmdutil.wrongtooltocontinue(repo, _(b'merge'))
4852 4850 cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
4853 4851 if abort:
@@ -5072,7 +5070,7 b' def parents(ui, repo, file_=None, **opts'
5072 5070
5073 5071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
5074 5072 for n in p:
5075 if n != nullid:
5073 if n != repo.nullid:
5076 5074 displayer.show(repo[n])
5077 5075 displayer.close()
5078 5076
@@ -6105,7 +6103,7 b' def resolve(ui, repo, *pats, **opts):'
6105 6103 with repo.wlock():
6106 6104 ms = mergestatemod.mergestate.read(repo)
6107 6105
6108 if not (ms.active() or repo.dirstate.p2() != nullid):
6106 if not (ms.active() or repo.dirstate.p2() != repo.nullid):
6109 6107 raise error.StateError(
6110 6108 _(b'resolve command not applicable when not merging')
6111 6109 )
@@ -6223,7 +6221,7 b' def resolve(ui, repo, *pats, **opts):'
6223 6221 raise
6224 6222
6225 6223 ms.commit()
6226 branchmerge = repo.dirstate.p2() != nullid
6224 branchmerge = repo.dirstate.p2() != repo.nullid
6227 6225 mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
6228 6226
6229 6227 if not didwork and pats:
@@ -6315,7 +6313,7 b' def revert(ui, repo, *pats, **opts):'
6315 6313 opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
6316 6314
6317 6315 parent, p2 = repo.dirstate.parents()
6318 if not opts.get(b'rev') and p2 != nullid:
6316 if not opts.get(b'rev') and p2 != repo.nullid:
6319 6317 # revert after merge is a trap for new users (issue2915)
6320 6318 raise error.InputError(
6321 6319 _(b'uncommitted merge with no revision specified'),
@@ -6335,7 +6333,7 b' def revert(ui, repo, *pats, **opts):'
6335 6333 or opts.get(b'interactive')
6336 6334 ):
6337 6335 msg = _(b"no files or directories specified")
6338 if p2 != nullid:
6336 if p2 != repo.nullid:
6339 6337 hint = _(
6340 6338 b"uncommitted merge, use --all to discard all changes,"
6341 6339 b" or 'hg update -C .' to abort the merge"
@@ -7396,7 +7394,7 b' def tag(ui, repo, name1, *names, **opts)'
7396 7394 for n in names:
7397 7395 if repo.tagtype(n) == b'global':
7398 7396 alltags = tagsmod.findglobaltags(ui, repo)
7399 if alltags[n][0] == nullid:
7397 if alltags[n][0] == repo.nullid:
7400 7398 raise error.InputError(
7401 7399 _(b"tag '%s' is already removed") % n
7402 7400 )
@@ -7423,7 +7421,7 b' def tag(ui, repo, name1, *names, **opts)'
7423 7421 )
7424 7422 if not opts.get(b'local'):
7425 7423 p1, p2 = repo.dirstate.parents()
7426 if p2 != nullid:
7424 if p2 != repo.nullid:
7427 7425 raise error.StateError(_(b'uncommitted merge'))
7428 7426 bheads = repo.branchheads()
7429 7427 if not opts.get(b'force') and bheads and p1 not in bheads:
@@ -10,7 +10,6 b' import errno'
10 10 from .i18n import _
11 11 from .node import (
12 12 hex,
13 nullid,
14 13 nullrev,
15 14 )
16 15
@@ -277,10 +276,10 b' def _filecommit('
277 276 """
278 277
279 278 fname = fctx.path()
280 fparent1 = manifest1.get(fname, nullid)
281 fparent2 = manifest2.get(fname, nullid)
279 fparent1 = manifest1.get(fname, repo.nullid)
280 fparent2 = manifest2.get(fname, repo.nullid)
282 281 touched = None
283 if fparent1 == fparent2 == nullid:
282 if fparent1 == fparent2 == repo.nullid:
284 283 touched = 'added'
285 284
286 285 if isinstance(fctx, context.filectx):
@@ -291,9 +290,11 b' def _filecommit('
291 290 if node in [fparent1, fparent2]:
292 291 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
293 292 if (
294 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
293 fparent1 != repo.nullid
294 and manifest1.flags(fname) != fctx.flags()
295 295 ) or (
296 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
296 fparent2 != repo.nullid
297 and manifest2.flags(fname) != fctx.flags()
297 298 ):
298 299 touched = 'modified'
299 300 return node, touched
@@ -327,7 +328,9 b' def _filecommit('
327 328 newfparent = fparent2
328 329
329 330 if manifest2: # branch merge
330 if fparent2 == nullid or cnode is None: # copied on remote side
331 if (
332 fparent2 == repo.nullid or cnode is None
333 ): # copied on remote side
331 334 if cfname in manifest2:
332 335 cnode = manifest2[cfname]
333 336 newfparent = fparent1
@@ -346,7 +349,7 b' def _filecommit('
346 349 if includecopymeta:
347 350 meta[b"copy"] = cfname
348 351 meta[b"copyrev"] = hex(cnode)
349 fparent1, fparent2 = nullid, newfparent
352 fparent1, fparent2 = repo.nullid, newfparent
350 353 else:
351 354 repo.ui.warn(
352 355 _(
@@ -356,20 +359,20 b' def _filecommit('
356 359 % (fname, cfname)
357 360 )
358 361
359 elif fparent1 == nullid:
360 fparent1, fparent2 = fparent2, nullid
361 elif fparent2 != nullid:
362 elif fparent1 == repo.nullid:
363 fparent1, fparent2 = fparent2, repo.nullid
364 elif fparent2 != repo.nullid:
362 365 if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other':
363 fparent1, fparent2 = fparent2, nullid
366 fparent1, fparent2 = fparent2, repo.nullid
364 367 elif ms.active() and ms.extras(fname).get(b'merged') != b'yes':
365 fparent1, fparent2 = fparent1, nullid
368 fparent1, fparent2 = fparent1, repo.nullid
366 369 # is one parent an ancestor of the other?
367 370 else:
368 371 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
369 372 if fparent1 in fparentancestors:
370 fparent1, fparent2 = fparent2, nullid
373 fparent1, fparent2 = fparent2, repo.nullid
371 374 elif fparent2 in fparentancestors:
372 fparent2 = nullid
375 fparent2 = repo.nullid
373 376
374 377 force_new_node = False
375 378 # The file might have been deleted by merge code and user explicitly choose
@@ -384,9 +387,14 b' def _filecommit('
384 387 force_new_node = True
385 388 # is the file changed?
386 389 text = fctx.data()
387 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
390 if (
391 fparent2 != repo.nullid
392 or meta
393 or flog.cmp(fparent1, text)
394 or force_new_node
395 ):
388 396 if touched is None: # do not overwrite added
389 if fparent2 == nullid:
397 if fparent2 == repo.nullid:
390 398 touched = 'modified'
391 399 else:
392 400 touched = 'merged'
@@ -14,14 +14,9 b' import stat'
14 14
15 15 from .i18n import _
16 16 from .node import (
17 addednodeid,
18 17 hex,
19 modifiednodeid,
20 nullid,
21 18 nullrev,
22 19 short,
23 wdirfilenodeids,
24 wdirhex,
25 20 )
26 21 from .pycompat import (
27 22 getattr,
@@ -140,7 +135,7 b' class basectx(object):'
140 135 removed.append(fn)
141 136 elif flag1 != flag2:
142 137 modified.append(fn)
143 elif node2 not in wdirfilenodeids:
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
144 139 # When comparing files between two commits, we save time by
145 140 # not comparing the file contents when the nodeids differ.
146 141 # Note that this means we incorrectly report a reverted change
@@ -737,7 +732,7 b' class changectx(basectx):'
737 732 n2 = c2._parents[0]._node
738 733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
739 734 if not cahs:
740 anc = nullid
735 anc = self._repo.nodeconstants.nullid
741 736 elif len(cahs) == 1:
742 737 anc = cahs[0]
743 738 else:
@@ -1132,7 +1127,11 b' class basefilectx(object):'
1132 1127 _path = self._path
1133 1128 fl = self._filelog
1134 1129 parents = self._filelog.parents(self._filenode)
1135 pl = [(_path, node, fl) for node in parents if node != nullid]
1130 pl = [
1131 (_path, node, fl)
1132 for node in parents
1133 if node != self._repo.nodeconstants.nullid
1134 ]
1136 1135
1137 1136 r = fl.renamed(self._filenode)
1138 1137 if r:
@@ -1556,12 +1555,12 b' class workingctx(committablectx):'
1556 1555 return self._repo.dirstate[key] not in b"?r"
1557 1556
1558 1557 def hex(self):
1559 return wdirhex
1558 return self._repo.nodeconstants.wdirhex
1560 1559
1561 1560 @propertycache
1562 1561 def _parents(self):
1563 1562 p = self._repo.dirstate.parents()
1564 if p[1] == nullid:
1563 if p[1] == self._repo.nodeconstants.nullid:
1565 1564 p = p[:-1]
1566 1565 # use unfiltered repo to delay/avoid loading obsmarkers
1567 1566 unfi = self._repo.unfiltered()
@@ -1572,7 +1571,9 b' class workingctx(committablectx):'
1572 1571 for n in p
1573 1572 ]
1574 1573
1575 def setparents(self, p1node, p2node=nullid):
1574 def setparents(self, p1node, p2node=None):
1575 if p2node is None:
1576 p2node = self._repo.nodeconstants.nullid
1576 1577 dirstate = self._repo.dirstate
1577 1578 with dirstate.parentchange():
1578 1579 copies = dirstate.setparents(p1node, p2node)
@@ -1584,7 +1585,7 b' class workingctx(committablectx):'
1584 1585 for f in copies:
1585 1586 if f not in pctx and copies[f] in pctx:
1586 1587 dirstate.copy(copies[f], f)
1587 if p2node == nullid:
1588 if p2node == self._repo.nodeconstants.nullid:
1588 1589 for f, s in sorted(dirstate.copies().items()):
1589 1590 if f not in pctx and s not in pctx:
1590 1591 dirstate.copy(None, f)
@@ -1944,8 +1945,8 b' class workingctx(committablectx):'
1944 1945
1945 1946 ff = self._flagfunc
1946 1947 for i, l in (
1947 (addednodeid, status.added),
1948 (modifiednodeid, status.modified),
1948 (self._repo.nodeconstants.addednodeid, status.added),
1949 (self._repo.nodeconstants.modifiednodeid, status.modified),
1949 1950 ):
1950 1951 for f in l:
1951 1952 man[f] = i
@@ -2070,13 +2071,18 b' class committablefilectx(basefilectx):'
2070 2071 path = self.copysource()
2071 2072 if not path:
2072 2073 return None
2073 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2074 return (
2075 path,
2076 self._changectx._parents[0]._manifest.get(
2077 path, self._repo.nodeconstants.nullid
2078 ),
2079 )
2074 2080
2075 2081 def parents(self):
2076 2082 '''return parent filectxs, following copies if necessary'''
2077 2083
2078 2084 def filenode(ctx, path):
2079 return ctx._manifest.get(path, nullid)
2085 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2080 2086
2081 2087 path = self._path
2082 2088 fl = self._filelog
@@ -2094,7 +2100,7 b' class committablefilectx(basefilectx):'
2094 2100 return [
2095 2101 self._parentfilectx(p, fileid=n, filelog=l)
2096 2102 for p, n, l in pl
2097 if n != nullid
2103 if n != self._repo.nodeconstants.nullid
2098 2104 ]
2099 2105
2100 2106 def children(self):
@@ -2222,7 +2228,9 b' class overlayworkingctx(committablectx):'
2222 2228 # ``overlayworkingctx`` (e.g. with --collapse).
2223 2229 util.clearcachedproperty(self, b'_manifest')
2224 2230
2225 def setparents(self, p1node, p2node=nullid):
2231 def setparents(self, p1node, p2node=None):
2232 if p2node is None:
2233 p2node = self._repo.nodeconstants.nullid
2226 2234 assert p1node == self._wrappedctx.node()
2227 2235 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2228 2236
@@ -2248,10 +2256,10 b' class overlayworkingctx(committablectx):'
2248 2256
2249 2257 flag = self._flagfunc
2250 2258 for path in self.added():
2251 man[path] = addednodeid
2259 man[path] = self._repo.nodeconstants.addednodeid
2252 2260 man.setflag(path, flag(path))
2253 2261 for path in self.modified():
2254 man[path] = modifiednodeid
2262 man[path] = self._repo.nodeconstants.modifiednodeid
2255 2263 man.setflag(path, flag(path))
2256 2264 for path in self.removed():
2257 2265 del man[path]
@@ -2827,7 +2835,7 b' class memctx(committablectx):'
2827 2835 )
2828 2836 self._rev = None
2829 2837 self._node = None
2830 parents = [(p or nullid) for p in parents]
2838 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2831 2839 p1, p2 = parents
2832 2840 self._parents = [self._repo[p] for p in (p1, p2)]
2833 2841 files = sorted(set(files))
@@ -2866,10 +2874,10 b' class memctx(committablectx):'
2866 2874 man = pctx.manifest().copy()
2867 2875
2868 2876 for f in self._status.modified:
2869 man[f] = modifiednodeid
2877 man[f] = self._repo.nodeconstants.modifiednodeid
2870 2878
2871 2879 for f in self._status.added:
2872 man[f] = addednodeid
2880 man[f] = self._repo.nodeconstants.addednodeid
2873 2881
2874 2882 for f in self._status.removed:
2875 2883 if f in man:
@@ -3006,12 +3014,12 b' class metadataonlyctx(committablectx):'
3006 3014 # sanity check to ensure that the reused manifest parents are
3007 3015 # manifests of our commit parents
3008 3016 mp1, mp2 = self.manifestctx().parents
3009 if p1 != nullid and p1.manifestnode() != mp1:
3017 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3010 3018 raise RuntimeError(
3011 3019 r"can't reuse the manifest: its p1 "
3012 3020 r"doesn't match the new ctx p1"
3013 3021 )
3014 if p2 != nullid and p2.manifestnode() != mp2:
3022 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3015 3023 raise RuntimeError(
3016 3024 r"can't reuse the manifest: "
3017 3025 r"its p2 doesn't match the new ctx p2"
@@ -12,10 +12,7 b' import collections'
12 12 import os
13 13
14 14 from .i18n import _
15 from .node import (
16 nullid,
17 nullrev,
18 )
15 from .node import nullrev
19 16
20 17 from . import (
21 18 match as matchmod,
@@ -579,7 +576,7 b' def _revinfo_getter_extra(repo):'
579 576 parents = fctx._filelog.parents(fctx._filenode)
580 577 nb_parents = 0
581 578 for n in parents:
582 if n != nullid:
579 if n != repo.nullid:
583 580 nb_parents += 1
584 581 return nb_parents >= 2
585 582
@@ -30,7 +30,6 b' from .i18n import _'
30 30 from .node import (
31 31 bin,
32 32 hex,
33 nullid,
34 33 nullrev,
35 34 short,
36 35 )
@@ -1667,7 +1666,7 b' def debugindexdot(ui, repo, file_=None, '
1667 1666 node = r.node(i)
1668 1667 pp = r.parents(node)
1669 1668 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1670 if pp[1] != nullid:
1669 if pp[1] != repo.nullid:
1671 1670 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1672 1671 ui.write(b"}\n")
1673 1672
@@ -1675,7 +1674,7 b' def debugindexdot(ui, repo, file_=None, '
1675 1674 @command(b'debugindexstats', [])
1676 1675 def debugindexstats(ui, repo):
1677 1676 """show stats related to the changelog index"""
1678 repo.changelog.shortest(nullid, 1)
1677 repo.changelog.shortest(repo.nullid, 1)
1679 1678 index = repo.changelog.index
1680 1679 if not util.safehasattr(index, b'stats'):
1681 1680 raise error.Abort(_(b'debugindexstats only works with native code'))
@@ -2425,7 +2424,7 b' def debugobsolete(ui, repo, precursor=No'
2425 2424 # arbitrary node identifiers, possibly not present in the
2426 2425 # local repository.
2427 2426 n = bin(s)
2428 if len(n) != len(nullid):
2427 if len(n) != repo.nodeconstants.nodelen:
2429 2428 raise TypeError()
2430 2429 return n
2431 2430 except TypeError:
@@ -3328,7 +3327,7 b' def debugrevlogindex(ui, repo, file_=Non'
3328 3327 try:
3329 3328 pp = r.parents(node)
3330 3329 except Exception:
3331 pp = [nullid, nullid]
3330 pp = [repo.nullid, repo.nullid]
3332 3331 if ui.verbose:
3333 3332 ui.write(
3334 3333 b"% 6d % 9d % 7d % 7d %s %s %s\n"
@@ -3742,7 +3741,9 b' def debugbackupbundle(ui, repo, *pats, *'
3742 3741 for n in chlist:
3743 3742 if limit is not None and count >= limit:
3744 3743 break
3745 parents = [True for p in other.changelog.parents(n) if p != nullid]
3744 parents = [
3745 True for p in other.changelog.parents(n) if p != repo.nullid
3746 ]
3746 3747 if opts.get(b"no_merges") and len(parents) == 2:
3747 3748 continue
3748 3749 count += 1
@@ -14,7 +14,6 b' import os'
14 14 import stat
15 15
16 16 from .i18n import _
17 from .node import nullid
18 17 from .pycompat import delattr
19 18
20 19 from hgdemandimport import tracing
@@ -314,7 +313,7 b' class dirstate(object):'
314 313 def branch(self):
315 314 return encoding.tolocal(self._branch)
316 315
317 def setparents(self, p1, p2=nullid):
316 def setparents(self, p1, p2=None):
318 317 """Set dirstate parents to p1 and p2.
319 318
320 319 When moving from two parents to one, 'm' merged entries a
@@ -323,6 +322,8 b' class dirstate(object):'
323 322
324 323 See localrepo.setparents()
325 324 """
325 if p2 is None:
326 p2 = self._nodeconstants.nullid
326 327 if self._parentwriters == 0:
327 328 raise ValueError(
328 329 b"cannot set dirstate parent outside of "
@@ -335,7 +336,10 b' class dirstate(object):'
335 336 self._origpl = self._pl
336 337 self._map.setparents(p1, p2)
337 338 copies = {}
338 if oldp2 != nullid and p2 == nullid:
339 if (
340 oldp2 != self._nodeconstants.nullid
341 and p2 == self._nodeconstants.nullid
342 ):
339 343 candidatefiles = self._map.nonnormalset.union(
340 344 self._map.otherparentset
341 345 )
@@ -459,7 +463,7 b' class dirstate(object):'
459 463
460 464 def normallookup(self, f):
461 465 '''Mark a file normal, but possibly dirty.'''
462 if self._pl[1] != nullid:
466 if self._pl[1] != self._nodeconstants.nullid:
463 467 # if there is a merge going on and the file was either
464 468 # in state 'm' (-1) or coming from other parent (-2) before
465 469 # being removed, restore that state.
@@ -481,7 +485,7 b' class dirstate(object):'
481 485
482 486 def otherparent(self, f):
483 487 '''Mark as coming from the other parent, always dirty.'''
484 if self._pl[1] == nullid:
488 if self._pl[1] == self._nodeconstants.nullid:
485 489 raise error.Abort(
486 490 _(b"setting %r to other parent only allowed in merges") % f
487 491 )
@@ -503,7 +507,7 b' class dirstate(object):'
503 507 self._dirty = True
504 508 oldstate = self[f]
505 509 size = 0
506 if self._pl[1] != nullid:
510 if self._pl[1] != self._nodeconstants.nullid:
507 511 entry = self._map.get(f)
508 512 if entry is not None:
509 513 # backup the previous state
@@ -519,7 +523,7 b' class dirstate(object):'
519 523
520 524 def merge(self, f):
521 525 '''Mark a file merged.'''
522 if self._pl[1] == nullid:
526 if self._pl[1] == self._nodeconstants.nullid:
523 527 return self.normallookup(f)
524 528 return self.otherparent(f)
525 529
@@ -638,7 +642,7 b' class dirstate(object):'
638 642
639 643 if self._origpl is None:
640 644 self._origpl = self._pl
641 self._map.setparents(parent, nullid)
645 self._map.setparents(parent, self._nodeconstants.nullid)
642 646
643 647 for f in to_lookup:
644 648 self.normallookup(f)
@@ -1459,7 +1463,7 b' class dirstatemap(object):'
1459 1463 def clear(self):
1460 1464 self._map.clear()
1461 1465 self.copymap.clear()
1462 self.setparents(nullid, nullid)
1466 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1463 1467 util.clearcachedproperty(self, b"_dirs")
1464 1468 util.clearcachedproperty(self, b"_alldirs")
1465 1469 util.clearcachedproperty(self, b"filefoldmap")
@@ -1636,7 +1640,10 b' class dirstatemap(object):'
1636 1640 st[self._nodelen : 2 * self._nodelen],
1637 1641 )
1638 1642 elif l == 0:
1639 self._parents = (nullid, nullid)
1643 self._parents = (
1644 self._nodeconstants.nullid,
1645 self._nodeconstants.nullid,
1646 )
1640 1647 else:
1641 1648 raise error.Abort(
1642 1649 _(b'working directory state appears damaged!')
@@ -1794,7 +1801,9 b' if rustmod is not None:'
1794 1801 def clear(self):
1795 1802 self._rustmap.clear()
1796 1803 self._inner_rustmap.clear()
1797 self.setparents(nullid, nullid)
1804 self.setparents(
1805 self._nodeconstants.nullid, self._nodeconstants.nullid
1806 )
1798 1807 util.clearcachedproperty(self, b"_dirs")
1799 1808 util.clearcachedproperty(self, b"_alldirs")
1800 1809 util.clearcachedproperty(self, b"dirfoldmap")
@@ -12,7 +12,6 b' import functools'
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 nullid,
16 15 short,
17 16 )
18 17
@@ -107,7 +106,7 b' class outgoing(object):'
107 106 if missingroots:
108 107 discbases = []
109 108 for n in missingroots:
110 discbases.extend([p for p in cl.parents(n) if p != nullid])
109 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
111 110 # TODO remove call to nodesbetween.
112 111 # TODO populate attributes on outgoing instance instead of setting
113 112 # discbases.
@@ -116,7 +115,7 b' class outgoing(object):'
116 115 ancestorsof = heads
117 116 commonheads = [n for n in discbases if n not in included]
118 117 elif not commonheads:
119 commonheads = [nullid]
118 commonheads = [repo.nullid]
120 119 self.commonheads = commonheads
121 120 self.ancestorsof = ancestorsof
122 121 self._revlog = cl
@@ -381,7 +380,7 b' def checkheads(pushop):'
381 380 # - a local outgoing head descended from update
382 381 # - a remote head that's known locally and not
383 382 # ancestral to an outgoing head
384 if remoteheads == [nullid]:
383 if remoteheads == [repo.nullid]:
385 384 # remote is empty, nothing to check.
386 385 return
387 386
@@ -13,7 +13,6 b' import weakref'
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 nullid,
17 16 nullrev,
18 17 )
19 18 from . import (
@@ -164,7 +163,7 b' def _computeoutgoing(repo, heads, common'
164 163 hasnode = cl.hasnode
165 164 common = [n for n in common if hasnode(n)]
166 165 else:
167 common = [nullid]
166 common = [repo.nullid]
168 167 if not heads:
169 168 heads = cl.heads()
170 169 return discovery.outgoing(repo, common, heads)
@@ -1839,7 +1838,7 b' def _pullbundle2(pullop):'
1839 1838 if (
1840 1839 pullop.remote.capable(b'clonebundles')
1841 1840 and pullop.heads is None
1842 and list(pullop.common) == [nullid]
1841 and list(pullop.common) == [pullop.repo.nullid]
1843 1842 ):
1844 1843 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1845 1844
@@ -1849,7 +1848,7 b' def _pullbundle2(pullop):'
1849 1848 pullop.repo.ui.status(_(b"no changes found\n"))
1850 1849 pullop.cgresult = 0
1851 1850 else:
1852 if pullop.heads is None and list(pullop.common) == [nullid]:
1851 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1853 1852 pullop.repo.ui.status(_(b"requesting all changes\n"))
1854 1853 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1855 1854 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
@@ -1920,7 +1919,7 b' def _pullchangeset(pullop):'
1920 1919 pullop.cgresult = 0
1921 1920 return
1922 1921 tr = pullop.gettransaction()
1923 if pullop.heads is None and list(pullop.common) == [nullid]:
1922 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1924 1923 pullop.repo.ui.status(_(b"requesting all changes\n"))
1925 1924 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1926 1925 # issue1320, avoid a race if remote changed after discovery
@@ -11,10 +11,7 b' import collections'
11 11 import weakref
12 12
13 13 from .i18n import _
14 from .node import (
15 nullid,
16 short,
17 )
14 from .node import short
18 15 from . import (
19 16 bookmarks,
20 17 error,
@@ -304,7 +301,7 b' def _pullchangesetdiscovery(repo, remote'
304 301 if set(remoteheads).issubset(common):
305 302 fetch = []
306 303
307 common.discard(nullid)
304 common.discard(repo.nullid)
308 305
309 306 return common, fetch, remoteheads
310 307
@@ -413,7 +410,7 b' def _processchangesetdata(repo, tr, objs'
413 410 # Linknode is always itself for changesets.
414 411 cset[b'node'],
415 412 # We always send full revisions. So delta base is not set.
416 nullid,
413 repo.nullid,
417 414 mdiff.trivialdiffheader(len(data)) + data,
418 415 # Flags not yet supported.
419 416 0,
@@ -478,7 +475,7 b' def _fetchmanifests(repo, tr, remote, ma'
478 475 basenode = manifest[b'deltabasenode']
479 476 delta = extrafields[b'delta']
480 477 elif b'revision' in extrafields:
481 basenode = nullid
478 basenode = repo.nullid
482 479 revision = extrafields[b'revision']
483 480 delta = mdiff.trivialdiffheader(len(revision)) + revision
484 481 else:
@@ -610,7 +607,7 b' def _fetchfiles(repo, tr, remote, fnodes'
610 607 basenode = filerevision[b'deltabasenode']
611 608 delta = extrafields[b'delta']
612 609 elif b'revision' in extrafields:
613 basenode = nullid
610 basenode = repo.nullid
614 611 revision = extrafields[b'revision']
615 612 delta = mdiff.trivialdiffheader(len(revision)) + revision
616 613 else:
@@ -705,7 +702,7 b' def _fetchfilesfromcsets('
705 702 basenode = filerevision[b'deltabasenode']
706 703 delta = extrafields[b'delta']
707 704 elif b'revision' in extrafields:
708 basenode = nullid
705 basenode = repo.nullid
709 706 revision = extrafields[b'revision']
710 707 delta = mdiff.trivialdiffheader(len(revision)) + revision
711 708 else:
@@ -8,10 +8,7 b''
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 from .node import (
12 nullid,
13 nullrev,
14 )
11 from .node import nullrev
15 12 from . import (
16 13 error,
17 14 revlog,
@@ -42,7 +39,7 b' class filelog(object):'
42 39 return self._revlog.__iter__()
43 40
44 41 def hasnode(self, node):
45 if node in (nullid, nullrev):
42 if node in (self.nullid, nullrev):
46 43 return False
47 44
48 45 try:
@@ -15,7 +15,6 b' import shutil'
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 nullid,
19 18 short,
20 19 )
21 20 from .pycompat import (
@@ -111,7 +110,7 b' class absentfilectx(object):'
111 110 return None
112 111
113 112 def filenode(self):
114 return nullid
113 return self._ctx.repo().nullid
115 114
116 115 _customcmp = True
117 116
@@ -16,8 +16,7 b' import stat'
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 nullhex,
20 nullid,
19 sha1nodeconstants,
21 20 short,
22 21 )
23 22 from .pycompat import getattr
@@ -772,7 +771,7 b' def clone('
772 771 },
773 772 ).result()
774 773
775 if rootnode != nullid:
774 if rootnode != sha1nodeconstants.nullid:
776 775 sharepath = os.path.join(sharepool, hex(rootnode))
777 776 else:
778 777 ui.status(
@@ -883,7 +882,9 b' def clone('
883 882 # we need to re-init the repo after manually copying the data
884 883 # into it
885 884 destpeer = peer(srcrepo, peeropts, dest)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
885 srcrepo.hook(
886 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
887 )
887 888 else:
888 889 try:
889 890 # only pass ui when no srcrepo
@@ -1329,7 +1330,9 b' def incoming(ui, repo, source, opts, sub'
1329 1330 for n in chlist:
1330 1331 if limit is not None and count >= limit:
1331 1332 break
1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1333 parents = [
1334 p for p in other.changelog.parents(n) if p != repo.nullid
1335 ]
1333 1336 if opts.get(b'no_merges') and len(parents) == 2:
1334 1337 continue
1335 1338 count += 1
@@ -1406,7 +1409,7 b' def _outgoing_filter(repo, revs, opts):'
1406 1409 for n in revs:
1407 1410 if limit is not None and count >= limit:
1408 1411 break
1409 parents = [p for p in cl.parents(n) if p != nullid]
1412 parents = [p for p in cl.parents(n) if p != repo.nullid]
1410 1413 if no_merges and len(parents) == 2:
1411 1414 continue
1412 1415 count += 1
@@ -14,7 +14,7 b' import os'
14 14 import re
15 15
16 16 from ..i18n import _
17 from ..node import hex, nullid, short
17 from ..node import hex, short
18 18 from ..pycompat import setattr
19 19
20 20 from .common import (
@@ -220,7 +220,7 b' def _ctxsgen(context, ctxs):'
220 220 def _siblings(siblings=None, hiderev=None):
221 221 if siblings is None:
222 222 siblings = []
223 siblings = [s for s in siblings if s.node() != nullid]
223 siblings = [s for s in siblings if s.node() != s.repo().nullid]
224 224 if len(siblings) == 1 and siblings[0].rev() == hiderev:
225 225 siblings = []
226 226 return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
@@ -316,12 +316,16 b' def _nodenamesgen(context, f, node, name'
316 316 yield {name: t}
317 317
318 318
319 def showtag(repo, t1, node=nullid):
319 def showtag(repo, t1, node=None):
320 if node is None:
321 node = repo.nullid
320 322 args = (repo.nodetags, node, b'tag')
321 323 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
322 324
323 325
324 def showbookmark(repo, t1, node=nullid):
326 def showbookmark(repo, t1, node=None):
327 if node is None:
328 node = repo.nullid
325 329 args = (repo.nodebookmarks, node, b'bookmark')
326 330 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
327 331
@@ -2,8 +2,6 b' from __future__ import absolute_import, '
2 2
3 3 import contextlib
4 4
5 from .. import node as nodemod
6
7 5 from . import util as interfaceutil
8 6
9 7
@@ -97,7 +95,7 b' class idirstate(interfaceutil.Interface)'
97 95 def branch():
98 96 pass
99 97
100 def setparents(p1, p2=nodemod.nullid):
98 def setparents(p1, p2=None):
101 99 """Set dirstate parents to p1 and p2.
102 100
103 101 When moving from two parents to one, 'm' merged entries a
@@ -19,7 +19,6 b' from .i18n import _'
19 19 from .node import (
20 20 bin,
21 21 hex,
22 nullid,
23 22 nullrev,
24 23 sha1nodeconstants,
25 24 short,
@@ -1702,7 +1701,7 b' class localrepository(object):'
1702 1701 _(b"warning: ignoring unknown working parent %s!\n")
1703 1702 % short(node)
1704 1703 )
1705 return nullid
1704 return self.nullid
1706 1705
1707 1706 @storecache(narrowspec.FILENAME)
1708 1707 def narrowpats(self):
@@ -1753,9 +1752,9 b' class localrepository(object):'
1753 1752 @unfilteredpropertycache
1754 1753 def _quick_access_changeid_null(self):
1755 1754 return {
1756 b'null': (nullrev, nullid),
1757 nullrev: (nullrev, nullid),
1758 nullid: (nullrev, nullid),
1755 b'null': (nullrev, self.nodeconstants.nullid),
1756 nullrev: (nullrev, self.nodeconstants.nullid),
1757 self.nullid: (nullrev, self.nullid),
1759 1758 }
1760 1759
1761 1760 @unfilteredpropertycache
@@ -1765,7 +1764,7 b' class localrepository(object):'
1765 1764 quick = self._quick_access_changeid_null.copy()
1766 1765 cl = self.unfiltered().changelog
1767 1766 for node in self.dirstate.parents():
1768 if node == nullid:
1767 if node == self.nullid:
1769 1768 continue
1770 1769 rev = cl.index.get_rev(node)
1771 1770 if rev is None:
@@ -1785,7 +1784,7 b' class localrepository(object):'
1785 1784 quick[r] = pair
1786 1785 quick[n] = pair
1787 1786 p1node = self.dirstate.p1()
1788 if p1node != nullid:
1787 if p1node != self.nullid:
1789 1788 quick[b'.'] = quick[p1node]
1790 1789 return quick
1791 1790
@@ -2037,7 +2036,7 b' class localrepository(object):'
2037 2036 # local encoding.
2038 2037 tags = {}
2039 2038 for (name, (node, hist)) in pycompat.iteritems(alltags):
2040 if node != nullid:
2039 if node != self.nullid:
2041 2040 tags[encoding.tolocal(name)] = node
2042 2041 tags[b'tip'] = self.changelog.tip()
2043 2042 tagtypes = {
@@ -2161,7 +2160,9 b' class localrepository(object):'
2161 2160 def wjoin(self, f, *insidef):
2162 2161 return self.vfs.reljoin(self.root, f, *insidef)
2163 2162
2164 def setparents(self, p1, p2=nullid):
2163 def setparents(self, p1, p2=None):
2164 if p2 is None:
2165 p2 = self.nullid
2165 2166 self[None].setparents(p1, p2)
2166 2167 self._quick_access_changeid_invalidate()
2167 2168
@@ -3094,7 +3095,7 b' class localrepository(object):'
3094 3095 subrepoutil.writestate(self, newstate)
3095 3096
3096 3097 p1, p2 = self.dirstate.parents()
3097 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3098 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3098 3099 try:
3099 3100 self.hook(
3100 3101 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
@@ -3267,7 +3268,7 b' class localrepository(object):'
3267 3268 t = n
3268 3269 while True:
3269 3270 p = self.changelog.parents(n)
3270 if p[1] != nullid or p[0] == nullid:
3271 if p[1] != self.nullid or p[0] == self.nullid:
3271 3272 b.append((t, n, p[0], p[1]))
3272 3273 break
3273 3274 n = p[0]
@@ -3280,7 +3281,7 b' class localrepository(object):'
3280 3281 n, l, i = top, [], 0
3281 3282 f = 1
3282 3283
3283 while n != bottom and n != nullid:
3284 while n != bottom and n != self.nullid:
3284 3285 p = self.changelog.parents(n)[0]
3285 3286 if i == f:
3286 3287 l.append(n)
@@ -12,12 +12,7 b' import os'
12 12 import posixpath
13 13
14 14 from .i18n import _
15 from .node import (
16 nullid,
17 nullrev,
18 wdirid,
19 wdirrev,
20 )
15 from .node import nullrev, wdirrev
21 16
22 17 from .thirdparty import attr
23 18
@@ -357,7 +352,7 b' class changesetprinter(object):'
357 352 if self.ui.debugflag:
358 353 mnode = ctx.manifestnode()
359 354 if mnode is None:
360 mnode = wdirid
355 mnode = self.repo.nodeconstants.wdirid
361 356 mrev = wdirrev
362 357 else:
363 358 mrev = self.repo.manifestlog.rev(mnode)
@@ -505,7 +500,11 b' class changesetformatter(changesetprinte'
505 500 )
506 501
507 502 if self.ui.debugflag or b'manifest' in datahint:
508 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
503 fm.data(
504 manifest=fm.hexfunc(
505 ctx.manifestnode() or self.repo.nodeconstants.wdirid
506 )
507 )
509 508 if self.ui.debugflag or b'extra' in datahint:
510 509 fm.data(extra=fm.formatdict(ctx.extra()))
511 510
@@ -991,7 +990,7 b' def _initialrevs(repo, wopts):'
991 990 """Return the initial set of revisions to be filtered or followed"""
992 991 if wopts.revspec:
993 992 revs = scmutil.revrange(repo, wopts.revspec)
994 elif wopts.follow and repo.dirstate.p1() == nullid:
993 elif wopts.follow and repo.dirstate.p1() == repo.nullid:
995 994 revs = smartset.baseset()
996 995 elif wopts.follow:
997 996 revs = repo.revs(b'.')
@@ -16,7 +16,6 b' from .i18n import _'
16 16 from .node import (
17 17 bin,
18 18 hex,
19 nullid,
20 19 nullrev,
21 20 )
22 21 from .pycompat import getattr
@@ -795,7 +794,10 b' class treemanifest(object):'
795 794 def __init__(self, nodeconstants, dir=b'', text=b''):
796 795 self._dir = dir
797 796 self.nodeconstants = nodeconstants
798 self._node = nullid
797 from .node import sha1nodeconstants
798
799 assert sha1nodeconstants == nodeconstants
800 self._node = self.nodeconstants.nullid
799 801 self._loadfunc = _noop
800 802 self._copyfunc = _noop
801 803 self._dirty = False
@@ -1391,7 +1393,7 b' class treemanifest(object):'
1391 1393 continue
1392 1394 subp1 = getnode(m1, d)
1393 1395 subp2 = getnode(m2, d)
1394 if subp1 == nullid:
1396 if subp1 == self.nodeconstants.nullid:
1395 1397 subp1, subp2 = subp2, subp1
1396 1398 writesubtree(subm, subp1, subp2, match)
1397 1399
@@ -1574,6 +1576,12 b' class manifestrevlog(object):'
1574 1576 value is passed in to the constructor.
1575 1577 """
1576 1578 self.nodeconstants = nodeconstants
1579 from .node import sha1nodeconstants
1580
1581 assert sha1nodeconstants == nodeconstants, (
1582 sha1nodeconstants,
1583 nodeconstants,
1584 )
1577 1585 # During normal operations, we expect to deal with not more than four
1578 1586 # revs at a time (such as during commit --amend). When rebasing large
1579 1587 # stacks of commits, the number can go up, hence the config knob below.
@@ -1929,6 +1937,9 b' class manifestlog(object):'
1929 1937
1930 1938 def __init__(self, opener, repo, rootstore, narrowmatch):
1931 1939 self.nodeconstants = repo.nodeconstants
1940 from .node import sha1nodeconstants
1941
1942 assert sha1nodeconstants == repo.nodeconstants
1932 1943 usetreemanifest = False
1933 1944 cachesize = 4
1934 1945
@@ -1994,7 +2005,7 b' class manifestlog(object):'
1994 2005 else:
1995 2006 m = manifestctx(self, node)
1996 2007
1997 if node != nullid:
2008 if node != self.nodeconstants.nullid:
1998 2009 mancache = self._dirmancache.get(tree)
1999 2010 if not mancache:
2000 2011 mancache = util.lrucachedict(self._cachesize)
@@ -2082,7 +2093,7 b' class manifestctx(object):'
2082 2093
2083 2094 def read(self):
2084 2095 if self._data is None:
2085 if self._node == nullid:
2096 if self._node == self._manifestlog.nodeconstants.nullid:
2086 2097 self._data = manifestdict()
2087 2098 else:
2088 2099 store = self._storage()
@@ -2188,7 +2199,7 b' class treemanifestctx(object):'
2188 2199 def read(self):
2189 2200 if self._data is None:
2190 2201 store = self._storage()
2191 if self._node == nullid:
2202 if self._node == self._manifestlog.nodeconstants.nullid:
2192 2203 self._data = treemanifest(self._manifestlog.nodeconstants)
2193 2204 # TODO accessing non-public API
2194 2205 elif store._treeondisk:
@@ -2296,6 +2307,9 b' class excludeddir(treemanifest):'
2296 2307
2297 2308 def __init__(self, nodeconstants, dir, node):
2298 2309 super(excludeddir, self).__init__(nodeconstants, dir)
2310 from .node import sha1nodeconstants
2311
2312 assert sha1nodeconstants == nodeconstants
2299 2313 self._node = node
2300 2314 # Add an empty file, which will be included by iterators and such,
2301 2315 # appearing as the directory itself (i.e. something like "dir/")
@@ -2316,6 +2330,9 b' class excludeddirmanifestctx(treemanifes'
2316 2330
2317 2331 def __init__(self, nodeconstants, dir, node):
2318 2332 self.nodeconstants = nodeconstants
2333 from .node import sha1nodeconstants
2334
2335 assert sha1nodeconstants == nodeconstants
2319 2336 self._dir = dir
2320 2337 self._node = node
2321 2338
@@ -2344,6 +2361,9 b' class excludedmanifestrevlog(manifestrev'
2344 2361
2345 2362 def __init__(self, nodeconstants, dir):
2346 2363 self.nodeconstants = nodeconstants
2364 from .node import sha1nodeconstants
2365
2366 assert sha1nodeconstants == nodeconstants
2347 2367 self._dir = dir
2348 2368
2349 2369 def __len__(self):
@@ -13,12 +13,7 b' import stat'
13 13 import struct
14 14
15 15 from .i18n import _
16 from .node import (
17 addednodeid,
18 modifiednodeid,
19 nullid,
20 nullrev,
21 )
16 from .node import nullrev
22 17 from .thirdparty import attr
23 18 from .utils import stringutil
24 19 from . import (
@@ -779,7 +774,7 b' def manifestmerge('
779 774 # to flag the change. If wctx is a committed revision, we shouldn't
780 775 # care for the dirty state of the working directory.
781 776 if any(wctx.sub(s).dirty() for s in wctx.substate):
782 m1[b'.hgsubstate'] = modifiednodeid
777 m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
783 778
784 779 # Don't use m2-vs-ma optimization if:
785 780 # - ma is the same as m1 or m2, which we're just going to diff again later
@@ -944,7 +939,7 b' def manifestmerge('
944 939 mresult.addcommitinfo(
945 940 f, b'merge-removal-candidate', b'yes'
946 941 )
947 elif n1 == addednodeid:
942 elif n1 == repo.nodeconstants.addednodeid:
948 943 # This file was locally added. We should forget it instead of
949 944 # deleting it.
950 945 mresult.addfile(
@@ -1785,7 +1780,7 b' def _advertisefsmonitor(repo, num_gets, '
1785 1780 if (
1786 1781 fsmonitorwarning
1787 1782 and not fsmonitorenabled
1788 and p1node == nullid
1783 and p1node == repo.nullid
1789 1784 and num_gets >= fsmonitorthreshold
1790 1785 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1791 1786 ):
@@ -1913,7 +1908,7 b' def _update('
1913 1908 else:
1914 1909 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1915 1910 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1916 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1911 pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
1917 1912 else:
1918 1913 pas = [p1.ancestor(p2, warn=branchmerge)]
1919 1914
@@ -2112,7 +2107,7 b' def _update('
2112 2107
2113 2108 ### apply phase
2114 2109 if not branchmerge: # just jump to the new rev
2115 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2110 fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
2116 2111 # If we're doing a partial update, we need to skip updating
2117 2112 # the dirstate.
2118 2113 always = matcher is None or matcher.always()
@@ -2281,14 +2276,14 b' def graft('
2281 2276 if keepconflictparent and stats.unresolvedcount:
2282 2277 pother = ctx.node()
2283 2278 else:
2284 pother = nullid
2279 pother = repo.nullid
2285 2280 parents = ctx.parents()
2286 2281 if keepparent and len(parents) == 2 and base in parents:
2287 2282 parents.remove(base)
2288 2283 pother = parents[0].node()
2289 2284 # Never set both parents equal to each other
2290 2285 if pother == pctx.node():
2291 pother = nullid
2286 pother = repo.nullid
2292 2287
2293 2288 if wctx.isinmemory():
2294 2289 wctx.setparents(pctx.node(), pother)
@@ -9,7 +9,6 b' from .i18n import _'
9 9 from .node import (
10 10 bin,
11 11 hex,
12 nullhex,
13 12 nullrev,
14 13 )
15 14 from . import (
@@ -32,7 +31,7 b' def _droponode(data):'
32 31
33 32
34 33 def _filectxorabsent(hexnode, ctx, f):
35 if hexnode == nullhex:
34 if hexnode == ctx.repo().nodeconstants.nullhex:
36 35 return filemerge.absentfilectx(ctx, f)
37 36 else:
38 37 return ctx[f]
@@ -248,7 +247,7 b' class _mergestate_base(object):'
248 247 note: also write the local version to the `.hg/merge` directory.
249 248 """
250 249 if fcl.isabsent():
251 localkey = nullhex
250 localkey = self._repo.nodeconstants.nullhex
252 251 else:
253 252 localkey = mergestate.getlocalkey(fcl.path())
254 253 self._make_backup(fcl, localkey)
@@ -354,7 +353,7 b' class _mergestate_base(object):'
354 353 flags = flo
355 354 if preresolve:
356 355 # restore local
357 if localkey != nullhex:
356 if localkey != self._repo.nodeconstants.nullhex:
358 357 self._restore_backup(wctx[dfile], localkey, flags)
359 358 else:
360 359 wctx[dfile].remove(ignoremissing=True)
@@ -658,7 +657,10 b' class mergestate(_mergestate_base):'
658 657 records.append(
659 658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
660 659 )
661 elif v[1] == nullhex or v[6] == nullhex:
660 elif (
661 v[1] == self._repo.nodeconstants.nullhex
662 or v[6] == self._repo.nodeconstants.nullhex
663 ):
662 664 # Change/Delete or Delete/Change conflicts. These are stored in
663 665 # 'C' records. v[1] is the local file, and is nullhex when the
664 666 # file is deleted locally ('dc'). v[6] is the remote file, and
@@ -11,10 +11,7 b' from __future__ import absolute_import, '
11 11 import multiprocessing
12 12 import struct
13 13
14 from .node import (
15 nullid,
16 nullrev,
17 )
14 from .node import nullrev
18 15 from . import (
19 16 error,
20 17 pycompat,
@@ -617,7 +614,7 b' def computechangesetfilesmerged(ctx):'
617 614 if f in ctx:
618 615 fctx = ctx[f]
619 616 parents = fctx._filelog.parents(fctx._filenode)
620 if parents[1] != nullid:
617 if parents[1] != ctx.repo().nullid:
621 618 merged.append(f)
622 619 return merged
623 620
@@ -58,11 +58,11 b' class sha1nodeconstants(object):'
58 58
59 59
60 60 # legacy starting point for porting modules
61 nullid = sha1nodeconstants.nullid
62 nullhex = sha1nodeconstants.nullhex
63 newnodeid = sha1nodeconstants.newnodeid
64 addednodeid = sha1nodeconstants.addednodeid
65 modifiednodeid = sha1nodeconstants.modifiednodeid
66 wdirfilenodeids = sha1nodeconstants.wdirfilenodeids
67 wdirid = sha1nodeconstants.wdirid
68 wdirhex = sha1nodeconstants.wdirhex
61 # nullid = sha1nodeconstants.nullid
62 # nullhex = sha1nodeconstants.nullhex
63 # newnodeid = sha1nodeconstants.newnodeid
64 # addednodeid = sha1nodeconstants.addednodeid
65 # modifiednodeid = sha1nodeconstants.modifiednodeid
66 # wdirfilenodeids = sha1nodeconstants.wdirfilenodeids
67 # wdirid = sha1nodeconstants.wdirid
68 # wdirhex = sha1nodeconstants.wdirhex
@@ -73,11 +73,14 b' import errno'
73 73 import struct
74 74
75 75 from .i18n import _
76 from .node import (
77 bin,
78 hex,
79 )
76 80 from .pycompat import getattr
77 81 from .node import (
78 82 bin,
79 83 hex,
80 nullid,
81 84 )
82 85 from . import (
83 86 encoding,
@@ -526,14 +529,14 b' def _addchildren(children, markers):'
526 529 children.setdefault(p, set()).add(mark)
527 530
528 531
529 def _checkinvalidmarkers(markers):
532 def _checkinvalidmarkers(repo, markers):
530 533 """search for marker with invalid data and raise error if needed
531 534
532 535 Exist as a separated function to allow the evolve extension for a more
533 536 subtle handling.
534 537 """
535 538 for mark in markers:
536 if nullid in mark[1]:
539 if repo.nullid in mark[1]:
537 540 raise error.Abort(
538 541 _(
539 542 b'bad obsolescence marker detected: '
@@ -727,7 +730,7 b' class obsstore(object):'
727 730 return []
728 731 self._version, markers = _readmarkers(data)
729 732 markers = list(markers)
730 _checkinvalidmarkers(markers)
733 _checkinvalidmarkers(self.repo, markers)
731 734 return markers
732 735
733 736 @propertycache
@@ -761,7 +764,7 b' class obsstore(object):'
761 764 _addpredecessors(self.predecessors, markers)
762 765 if self._cached('children'):
763 766 _addchildren(self.children, markers)
764 _checkinvalidmarkers(markers)
767 _checkinvalidmarkers(self.repo, markers)
765 768
766 769 def relevantmarkers(self, nodes):
767 770 """return a set of all obsolescence markers relevant to a set of nodes.
@@ -20,7 +20,7 b' import zlib'
20 20 from .i18n import _
21 21 from .node import (
22 22 hex,
23 nullhex,
23 sha1nodeconstants,
24 24 short,
25 25 )
26 26 from .pycompat import open
@@ -3100,8 +3100,8 b' def diffcontent(data1, data2, header, bi'
3100 3100
3101 3101 ctx1, fctx1, path1, flag1, content1, date1 = data1
3102 3102 ctx2, fctx2, path2, flag2, content2, date2 = data2
3103 index1 = _gitindex(content1) if path1 in ctx1 else nullhex
3104 index2 = _gitindex(content2) if path2 in ctx2 else nullhex
3103 index1 = _gitindex(content1) if path1 in ctx1 else sha1nodeconstants.nullhex
3104 index2 = _gitindex(content2) if path2 in ctx2 else sha1nodeconstants.nullhex
3105 3105 if binary and opts.git and not opts.nobinary:
3106 3106 text = mdiff.b85diff(content1, content2)
3107 3107 if text:
@@ -109,7 +109,6 b' from .i18n import _'
109 109 from .node import (
110 110 bin,
111 111 hex,
112 nullid,
113 112 nullrev,
114 113 short,
115 114 wdirrev,
@@ -862,7 +861,7 b' def analyzeremotephases(repo, subset, ro'
862 861 node = bin(nhex)
863 862 phase = int(phase)
864 863 if phase == public:
865 if node != nullid:
864 if node != repo.nullid:
866 865 repo.ui.warn(
867 866 _(
868 867 b'ignoring inconsistent public root'
@@ -919,10 +918,10 b' def newheads(repo, heads, roots):'
919 918 rev = cl.index.get_rev
920 919 if not roots:
921 920 return heads
922 if not heads or heads == [nullid]:
921 if not heads or heads == [repo.nullid]:
923 922 return []
924 923 # The logic operated on revisions, convert arguments early for convenience
925 new_heads = {rev(n) for n in heads if n != nullid}
924 new_heads = {rev(n) for n in heads if n != repo.nullid}
926 925 roots = [rev(n) for n in roots]
927 926 # compute the area we need to remove
928 927 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
@@ -10,7 +10,10 b' from __future__ import absolute_import'
10 10 import struct
11 11 import zlib
12 12
13 from ..node import nullid, nullrev
13 from ..node import (
14 nullrev,
15 sha1nodeconstants,
16 )
14 17 from .. import (
15 18 pycompat,
16 19 util,
@@ -50,7 +53,7 b' class BaseIndexObject(object):'
50 53 # Size of a C long int, platform independent
51 54 int_size = struct.calcsize(b'>i')
52 55 # An empty index entry, used as a default value to be overridden, or nullrev
53 null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
56 null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid)
54 57
55 58 @util.propertycache
56 59 def entry_size(self):
@@ -64,7 +67,7 b' class BaseIndexObject(object):'
64 67
65 68 @util.propertycache
66 69 def _nodemap(self):
67 nodemap = nodemaputil.NodeMap({nullid: nullrev})
70 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
68 71 for r in range(0, len(self)):
69 72 n = self[r][7]
70 73 nodemap[n] = r
@@ -246,7 +249,7 b' def parse_index2(data, inline, revlogv2='
246 249
247 250 class Index2Mixin(object):
248 251 index_format = revlog_constants.INDEX_ENTRY_V2
249 null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
252 null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
250 253
251 254 def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
252 255 """
@@ -26,14 +26,9 b' import zlib'
26 26 from .node import (
27 27 bin,
28 28 hex,
29 nullhex,
30 nullid,
31 29 nullrev,
32 30 sha1nodeconstants,
33 31 short,
34 wdirfilenodeids,
35 wdirhex,
36 wdirid,
37 32 wdirrev,
38 33 )
39 34 from .i18n import _
@@ -232,7 +227,7 b' class revlogoldindex(list):'
232 227
233 228 @util.propertycache
234 229 def _nodemap(self):
235 nodemap = nodemaputil.NodeMap({nullid: nullrev})
230 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
236 231 for r in range(0, len(self)):
237 232 n = self[r][7]
238 233 nodemap[n] = r
@@ -270,7 +265,7 b' class revlogoldindex(list):'
270 265
271 266 def __getitem__(self, i):
272 267 if i == -1:
273 return (0, 0, 0, -1, -1, -1, -1, nullid)
268 return (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid)
274 269 return list.__getitem__(self, i)
275 270
276 271
@@ -278,7 +273,7 b' class revlogoldio(object):'
278 273 def parseindex(self, data, inline):
279 274 s = INDEX_ENTRY_V0.size
280 275 index = []
281 nodemap = nodemaputil.NodeMap({nullid: nullrev})
276 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
282 277 n = off = 0
283 278 l = len(data)
284 279 while off + s <= l:
@@ -818,7 +813,10 b' class revlog(object):'
818 813 raise
819 814 except error.RevlogError:
820 815 # parsers.c radix tree lookup failed
821 if node == wdirid or node in wdirfilenodeids:
816 if (
817 node == self.nodeconstants.wdirid
818 or node in self.nodeconstants.wdirfilenodeids
819 ):
822 820 raise error.WdirUnsupported
823 821 raise error.LookupError(node, self.indexfile, _(b'no node'))
824 822
@@ -909,7 +907,7 b' class revlog(object):'
909 907 i = self.index
910 908 d = i[self.rev(node)]
911 909 # inline node() to avoid function call overhead
912 if d[5] == nullid:
910 if d[5] == self.nullid:
913 911 return i[d[6]][7], i[d[5]][7]
914 912 else:
915 913 return i[d[5]][7], i[d[6]][7]
@@ -1027,7 +1025,7 b' class revlog(object):'
1027 1025 not supplied, uses all of the revlog's heads. If common is not
1028 1026 supplied, uses nullid."""
1029 1027 if common is None:
1030 common = [nullid]
1028 common = [self.nullid]
1031 1029 if heads is None:
1032 1030 heads = self.heads()
1033 1031
@@ -1133,7 +1131,7 b' class revlog(object):'
1133 1131 not supplied, uses all of the revlog's heads. If common is not
1134 1132 supplied, uses nullid."""
1135 1133 if common is None:
1136 common = [nullid]
1134 common = [self.nullid]
1137 1135 if heads is None:
1138 1136 heads = self.heads()
1139 1137
@@ -1171,11 +1169,15 b' class revlog(object):'
1171 1169 return nonodes
1172 1170 lowestrev = min([self.rev(n) for n in roots])
1173 1171 else:
1174 roots = [nullid] # Everybody's a descendant of nullid
1172 roots = [self.nullid] # Everybody's a descendant of nullid
1175 1173 lowestrev = nullrev
1176 1174 if (lowestrev == nullrev) and (heads is None):
1177 1175 # We want _all_ the nodes!
1178 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1176 return (
1177 [self.node(r) for r in self],
1178 [self.nullid],
1179 list(self.heads()),
1180 )
1179 1181 if heads is None:
1180 1182 # All nodes are ancestors, so the latest ancestor is the last
1181 1183 # node.
@@ -1201,7 +1203,7 b' class revlog(object):'
1201 1203 # grab a node to tag
1202 1204 n = nodestotag.pop()
1203 1205 # Never tag nullid
1204 if n == nullid:
1206 if n == self.nullid:
1205 1207 continue
1206 1208 # A node's revision number represents its place in a
1207 1209 # topologically sorted list of nodes.
@@ -1213,7 +1215,7 b' class revlog(object):'
1213 1215 ancestors.add(n) # Mark as ancestor
1214 1216 # Add non-nullid parents to list of nodes to tag.
1215 1217 nodestotag.update(
1216 [p for p in self.parents(n) if p != nullid]
1218 [p for p in self.parents(n) if p != self.nullid]
1217 1219 )
1218 1220 elif n in heads: # We've seen it before, is it a fake head?
1219 1221 # So it is, real heads should not be the ancestors of
@@ -1241,7 +1243,7 b' class revlog(object):'
1241 1243 # We are descending from nullid, and don't need to care about
1242 1244 # any other roots.
1243 1245 lowestrev = nullrev
1244 roots = [nullid]
1246 roots = [self.nullid]
1245 1247 # Transform our roots list into a set.
1246 1248 descendants = set(roots)
1247 1249 # Also, keep the original roots so we can filter out roots that aren't
@@ -1335,7 +1337,7 b' class revlog(object):'
1335 1337 """
1336 1338 if start is None and stop is None:
1337 1339 if not len(self):
1338 return [nullid]
1340 return [self.nullid]
1339 1341 return [self.node(r) for r in self.headrevs()]
1340 1342
1341 1343 if start is None:
@@ -1425,7 +1427,7 b' class revlog(object):'
1425 1427 if ancs:
1426 1428 # choose a consistent winner when there's a tie
1427 1429 return min(map(self.node, ancs))
1428 return nullid
1430 return self.nullid
1429 1431
1430 1432 def _match(self, id):
1431 1433 if isinstance(id, int):
@@ -1463,7 +1465,7 b' class revlog(object):'
1463 1465
1464 1466 def _partialmatch(self, id):
1465 1467 # we don't care wdirfilenodeids as they should be always full hash
1466 maybewdir = wdirhex.startswith(id)
1468 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1467 1469 try:
1468 1470 partial = self.index.partialmatch(id)
1469 1471 if partial and self.hasnode(partial):
@@ -1499,8 +1501,8 b' class revlog(object):'
1499 1501 nl = [
1500 1502 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1501 1503 ]
1502 if nullhex.startswith(id):
1503 nl.append(nullid)
1504 if self.nodeconstants.nullhex.startswith(id):
1505 nl.append(self.nullid)
1504 1506 if len(nl) > 0:
1505 1507 if len(nl) == 1 and not maybewdir:
1506 1508 self._pcache[id] = nl[0]
@@ -1560,13 +1562,13 b' class revlog(object):'
1560 1562 length = max(self.index.shortest(node), minlength)
1561 1563 return disambiguate(hexnode, length)
1562 1564 except error.RevlogError:
1563 if node != wdirid:
1565 if node != self.nodeconstants.wdirid:
1564 1566 raise error.LookupError(node, self.indexfile, _(b'no node'))
1565 1567 except AttributeError:
1566 1568 # Fall through to pure code
1567 1569 pass
1568 1570
1569 if node == wdirid:
1571 if node == self.nodeconstants.wdirid:
1570 1572 for length in range(minlength, len(hexnode) + 1):
1571 1573 prefix = hexnode[:length]
1572 1574 if isvalid(prefix):
@@ -1881,7 +1883,7 b' class revlog(object):'
1881 1883 rev = None
1882 1884
1883 1885 # fast path the special `nullid` rev
1884 if node == nullid:
1886 if node == self.nullid:
1885 1887 return b"", {}
1886 1888
1887 1889 # ``rawtext`` is the text as stored inside the revlog. Might be the
@@ -2302,11 +2304,14 b' class revlog(object):'
2302 2304 - rawtext is optional (can be None); if not set, cachedelta must be set.
2303 2305 if both are set, they must correspond to each other.
2304 2306 """
2305 if node == nullid:
2307 if node == self.nullid:
2306 2308 raise error.RevlogError(
2307 2309 _(b"%s: attempt to add null revision") % self.indexfile
2308 2310 )
2309 if node == wdirid or node in wdirfilenodeids:
2311 if (
2312 node == self.nodeconstants.wdirid
2313 or node in self.nodeconstants.wdirfilenodeids
2314 ):
2310 2315 raise error.RevlogError(
2311 2316 _(b"%s: attempt to add wdir revision") % self.indexfile
2312 2317 )
@@ -19,10 +19,8 b' from .i18n import _'
19 19 from .node import (
20 20 bin,
21 21 hex,
22 nullid,
23 22 nullrev,
24 23 short,
25 wdirid,
26 24 wdirrev,
27 25 )
28 26 from .pycompat import getattr
@@ -450,7 +448,7 b' def binnode(ctx):'
450 448 """Return binary node id for a given basectx"""
451 449 node = ctx.node()
452 450 if node is None:
453 return wdirid
451 return ctx.repo().nodeconstants.wdirid
454 452 return node
455 453
456 454
@@ -1108,7 +1106,7 b' def cleanupnodes('
1108 1106 if roots:
1109 1107 newnode = roots[0].node()
1110 1108 else:
1111 newnode = nullid
1109 newnode = repo.nullid
1112 1110 else:
1113 1111 newnode = newnodes[0]
1114 1112 moves[oldnode] = newnode
@@ -1506,7 +1504,7 b' def movedirstate(repo, newctx, match=Non'
1506 1504 oldctx = repo[b'.']
1507 1505 ds = repo.dirstate
1508 1506 copies = dict(ds.copies())
1509 ds.setparents(newctx.node(), nullid)
1507 ds.setparents(newctx.node(), repo.nullid)
1510 1508 s = newctx.status(oldctx, match=match)
1511 1509 for f in s.modified:
1512 1510 if ds[f] == b'r':
@@ -46,10 +46,7 b' import collections'
46 46 import random
47 47
48 48 from .i18n import _
49 from .node import (
50 nullid,
51 nullrev,
52 )
49 from .node import nullrev
53 50 from . import (
54 51 error,
55 52 policy,
@@ -391,9 +388,9 b' def findcommonheads('
391 388 audit[b'total-roundtrips'] = 1
392 389
393 390 if cl.tiprev() == nullrev:
394 if srvheadhashes != [nullid]:
395 return [nullid], True, srvheadhashes
396 return [nullid], False, []
391 if srvheadhashes != [cl.nullid]:
392 return [cl.nullid], True, srvheadhashes
393 return [cl.nullid], False, []
397 394 else:
398 395 # we still need the remote head for the function return
399 396 with remote.commandexecutor() as e:
@@ -406,7 +403,7 b' def findcommonheads('
406 403
407 404 knownsrvheads = [] # revnos of remote heads that are known locally
408 405 for node in srvheadhashes:
409 if node == nullid:
406 if node == cl.nullid:
410 407 continue
411 408
412 409 try:
@@ -503,17 +500,17 b' def findcommonheads('
503 500 if audit is not None:
504 501 audit[b'total-roundtrips'] = roundtrips
505 502
506 if not result and srvheadhashes != [nullid]:
503 if not result and srvheadhashes != [cl.nullid]:
507 504 if abortwhenunrelated:
508 505 raise error.Abort(_(b"repository is unrelated"))
509 506 else:
510 507 ui.warn(_(b"warning: repository is unrelated\n"))
511 508 return (
512 {nullid},
509 {cl.nullid},
513 510 True,
514 511 srvheadhashes,
515 512 )
516 513
517 anyincoming = srvheadhashes != [nullid]
514 anyincoming = srvheadhashes != [cl.nullid]
518 515 result = {clnode(r) for r in result}
519 516 return result, anyincoming, srvheadhashes
@@ -31,7 +31,6 b' from .i18n import _'
31 31 from .node import (
32 32 bin,
33 33 hex,
34 nullid,
35 34 nullrev,
36 35 )
37 36 from . import (
@@ -822,7 +821,7 b' def unshelvecontinue(ui, repo, state, op'
822 821 pendingctx = state.pendingctx
823 822
824 823 with repo.dirstate.parentchange():
825 repo.setparents(state.pendingctx.node(), nullid)
824 repo.setparents(state.pendingctx.node(), repo.nullid)
826 825 repo.dirstate.write(repo.currenttransaction())
827 826
828 827 targetphase = phases.internal
@@ -831,7 +830,7 b' def unshelvecontinue(ui, repo, state, op'
831 830 overrides = {(b'phases', b'new-commit'): targetphase}
832 831 with repo.ui.configoverride(overrides, b'unshelve'):
833 832 with repo.dirstate.parentchange():
834 repo.setparents(state.parents[0], nullid)
833 repo.setparents(state.parents[0], repo.nullid)
835 834 newnode, ispartialunshelve = _createunshelvectx(
836 835 ui, repo, shelvectx, basename, interactive, opts
837 836 )
@@ -1027,7 +1026,7 b' def _rebaserestoredcommit('
1027 1026 raise error.ConflictResolutionRequired(b'unshelve')
1028 1027
1029 1028 with repo.dirstate.parentchange():
1030 repo.setparents(tmpwctx.node(), nullid)
1029 repo.setparents(tmpwctx.node(), repo.nullid)
1031 1030 newnode, ispartialunshelve = _createunshelvectx(
1032 1031 ui, repo, shelvectx, basename, interactive, opts
1033 1032 )
@@ -10,10 +10,7 b' from __future__ import absolute_import'
10 10 import os
11 11
12 12 from .i18n import _
13 from .node import (
14 hex,
15 nullid,
16 )
13 from .node import hex
17 14 from . import (
18 15 error,
19 16 match as matchmod,
@@ -177,7 +174,7 b' def activeconfig(repo):'
177 174 revs = [
178 175 repo.changelog.rev(node)
179 176 for node in repo.dirstate.parents()
180 if node != nullid
177 if node != repo.nullid
181 178 ]
182 179
183 180 allincludes = set()
@@ -321,7 +318,7 b' def matcher(repo, revs=None, includetemp'
321 318 revs = [
322 319 repo.changelog.rev(node)
323 320 for node in repo.dirstate.parents()
324 if node != nullid
321 if node != repo.nullid
325 322 ]
326 323
327 324 signature = configsignature(repo, includetemp=includetemp)
@@ -2,7 +2,6 b' from __future__ import absolute_import'
2 2
3 3 from .i18n import _
4 4 from .pycompat import getattr
5 from .node import nullid
6 5 from . import (
7 6 bookmarks as bookmarksmod,
8 7 cmdutil,
@@ -39,7 +38,7 b' def _findupdatetarget(repo, nodes):'
39 38
40 39 if (
41 40 util.safehasattr(repo, b'mq')
42 and p2 != nullid
41 and p2 != repo.nullid
43 42 and p2 in [x.node for x in repo.mq.applied]
44 43 ):
45 44 unode = p2
@@ -218,7 +217,7 b' def debugstrip(ui, repo, *revs, **opts):'
218 217 # if one of the wdir parent is stripped we'll need
219 218 # to update away to an earlier revision
220 219 update = any(
221 p != nullid and cl.rev(p) in strippedrevs
220 p != repo.nullid and cl.rev(p) in strippedrevs
222 221 for p in repo.dirstate.parents()
223 222 )
224 223
@@ -21,7 +21,6 b' from .i18n import _'
21 21 from .node import (
22 22 bin,
23 23 hex,
24 nullid,
25 24 short,
26 25 )
27 26 from . import (
@@ -686,7 +685,7 b' class hgsubrepo(abstractsubrepo):'
686 685 # we can't fully delete the repository as it may contain
687 686 # local-only history
688 687 self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
689 hg.clean(self._repo, nullid, False)
688 hg.clean(self._repo, self._repo.nullid, False)
690 689
691 690 def _get(self, state):
692 691 source, revision, kind = state
@@ -74,9 +74,6 b''
74 74 from __future__ import absolute_import
75 75
76 76 from .i18n import _
77 from .node import (
78 nullhex,
79 )
80 77 from . import (
81 78 tags as tagsmod,
82 79 util,
@@ -243,8 +240,8 b' def merge(repo, fcd, fco, fca):'
243 240 pnlosttagset = basetagset - pntagset
244 241 for t in pnlosttagset:
245 242 pntags[t] = basetags[t]
246 if pntags[t][-1][0] != nullhex:
247 pntags[t].append([nullhex, None])
243 if pntags[t][-1][0] != repo.nodeconstants.nullhex:
244 pntags[t].append([repo.nodeconstants.nullhex, None])
248 245
249 246 conflictedtags = [] # for reporting purposes
250 247 mergedtags = util.sortdict(p1tags)
@@ -18,7 +18,6 b' import io'
18 18 from .node import (
19 19 bin,
20 20 hex,
21 nullid,
22 21 nullrev,
23 22 short,
24 23 )
@@ -96,12 +95,12 b' def fnoderevs(ui, repo, revs):'
96 95 return fnodes
97 96
98 97
99 def _nulltonone(value):
98 def _nulltonone(repo, value):
100 99 """convert nullid to None
101 100
102 101 For tag value, nullid means "deleted". This small utility function helps
103 102 translating that to None."""
104 if value == nullid:
103 if value == repo.nullid:
105 104 return None
106 105 return value
107 106
@@ -123,14 +122,14 b' def difftags(ui, repo, oldfnodes, newfno'
123 122 # list of (tag, old, new): None means missing
124 123 entries = []
125 124 for tag, (new, __) in newtags.items():
126 new = _nulltonone(new)
125 new = _nulltonone(repo, new)
127 126 old, __ = oldtags.pop(tag, (None, None))
128 old = _nulltonone(old)
127 old = _nulltonone(repo, old)
129 128 if old != new:
130 129 entries.append((tag, old, new))
131 130 # handle deleted tags
132 131 for tag, (old, __) in oldtags.items():
133 old = _nulltonone(old)
132 old = _nulltonone(repo, old)
134 133 if old is not None:
135 134 entries.append((tag, old, None))
136 135 entries.sort()
@@ -452,7 +451,7 b' def _readtagcache(ui, repo):'
452 451 repoheads = repo.heads()
453 452 # Case 2 (uncommon): empty repo; get out quickly and don't bother
454 453 # writing an empty cache.
455 if repoheads == [nullid]:
454 if repoheads == [repo.nullid]:
456 455 return ([], {}, valid, {}, False)
457 456
458 457 # Case 3 (uncommon): cache file missing or empty.
@@ -499,7 +498,7 b' def _getfnodes(ui, repo, nodes):'
499 498 for node in nodes:
500 499 fnode = fnodescache.getfnode(node)
501 500 flog = repo.file(b'.hgtags')
502 if fnode != nullid:
501 if fnode != repo.nullid:
503 502 if fnode not in validated_fnodes:
504 503 if flog.hasnode(fnode):
505 504 validated_fnodes.add(fnode)
@@ -510,7 +509,7 b' def _getfnodes(ui, repo, nodes):'
510 509 if unknown_entries:
511 510 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
512 511 for node, fnode in pycompat.iteritems(fixed_nodemap):
513 if fnode != nullid:
512 if fnode != repo.nullid:
514 513 cachefnode[node] = fnode
515 514
516 515 fnodescache.write()
@@ -632,7 +631,7 b' def _tag('
632 631 m = name
633 632
634 633 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
635 old = repo.tags().get(name, nullid)
634 old = repo.tags().get(name, repo.nullid)
636 635 fp.write(b'%s %s\n' % (hex(old), m))
637 636 fp.write(b'%s %s\n' % (hex(node), m))
638 637 fp.close()
@@ -762,8 +761,8 b' class hgtagsfnodescache(object):'
762 761 If an .hgtags does not exist at the specified revision, nullid is
763 762 returned.
764 763 """
765 if node == nullid:
766 return nullid
764 if node == self._repo.nullid:
765 return node
767 766
768 767 ctx = self._repo[node]
769 768 rev = ctx.rev()
@@ -826,7 +825,7 b' class hgtagsfnodescache(object):'
826 825 fnode = ctx.filenode(b'.hgtags')
827 826 except error.LookupError:
828 827 # No .hgtags file on this revision.
829 fnode = nullid
828 fnode = self._repo.nullid
830 829 return fnode
831 830
832 831 def setfnode(self, node, fnode):
@@ -10,10 +10,7 b' from __future__ import absolute_import'
10 10 import re
11 11
12 12 from .i18n import _
13 from .node import (
14 bin,
15 wdirid,
16 )
13 from .node import bin
17 14 from . import (
18 15 color,
19 16 dagop,
@@ -778,7 +775,7 b' def shortest(context, mapping, args):'
778 775 try:
779 776 node = scmutil.resolvehexnodeidprefix(repo, hexnode)
780 777 except error.WdirUnsupported:
781 node = wdirid
778 node = repo.nodeconstants.wdirid
782 779 except error.LookupError:
783 780 return hexnode
784 781 if not node:
@@ -10,8 +10,6 b' from __future__ import absolute_import'
10 10 from .i18n import _
11 11 from .node import (
12 12 hex,
13 nullid,
14 wdirid,
15 13 wdirrev,
16 14 )
17 15
@@ -412,7 +410,7 b' def getgraphnode(repo, ctx, cache):'
412 410
413 411 def getgraphnodecurrent(repo, ctx, cache):
414 412 wpnodes = repo.dirstate.parents()
415 if wpnodes[1] == nullid:
413 if wpnodes[1] == repo.nullid:
416 414 wpnodes = wpnodes[:1]
417 415 if ctx.node() in wpnodes:
418 416 return b'@'
@@ -525,11 +523,12 b' def showmanifest(context, mapping):'
525 523 ctx = context.resource(mapping, b'ctx')
526 524 mnode = ctx.manifestnode()
527 525 if mnode is None:
528 mnode = wdirid
526 mnode = repo.nodeconstants.wdirid
529 527 mrev = wdirrev
528 mhex = repo.nodeconstants.wdirhex
530 529 else:
531 530 mrev = repo.manifestlog.rev(mnode)
532 mhex = hex(mnode)
531 mhex = hex(mnode)
533 532 mapping = context.overlaymap(mapping, {b'rev': mrev, b'node': mhex})
534 533 f = context.process(b'manifest', mapping)
535 534 return templateutil.hybriditem(
@@ -11,7 +11,6 b' import unittest'
11 11
12 12 from ..node import (
13 13 hex,
14 nullid,
15 14 nullrev,
16 15 )
17 16 from ..pycompat import getattr
@@ -51,7 +50,7 b' class ifileindextests(basetestcase):'
51 50 self.assertFalse(f.hasnode(None))
52 51 self.assertFalse(f.hasnode(0))
53 52 self.assertFalse(f.hasnode(nullrev))
54 self.assertFalse(f.hasnode(nullid))
53 self.assertFalse(f.hasnode(f.nullid))
55 54 self.assertFalse(f.hasnode(b'0'))
56 55 self.assertFalse(f.hasnode(b'a' * 20))
57 56
@@ -64,8 +63,8 b' class ifileindextests(basetestcase):'
64 63
65 64 self.assertEqual(list(f.revs(start=20)), [])
66 65
67 # parents() and parentrevs() work with nullid/nullrev.
68 self.assertEqual(f.parents(nullid), (nullid, nullid))
66 # parents() and parentrevs() work with f.nullid/nullrev.
67 self.assertEqual(f.parents(f.nullid), (f.nullid, f.nullid))
69 68 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
70 69
71 70 with self.assertRaises(error.LookupError):
@@ -78,9 +77,9 b' class ifileindextests(basetestcase):'
78 77 with self.assertRaises(IndexError):
79 78 f.parentrevs(i)
80 79
81 # nullid/nullrev lookup always works.
82 self.assertEqual(f.rev(nullid), nullrev)
83 self.assertEqual(f.node(nullrev), nullid)
80 # f.nullid/nullrev lookup always works.
81 self.assertEqual(f.rev(f.nullid), nullrev)
82 self.assertEqual(f.node(nullrev), f.nullid)
84 83
85 84 with self.assertRaises(error.LookupError):
86 85 f.rev(b'\x01' * 20)
@@ -92,16 +91,16 b' class ifileindextests(basetestcase):'
92 91 with self.assertRaises(IndexError):
93 92 f.node(i)
94 93
95 self.assertEqual(f.lookup(nullid), nullid)
96 self.assertEqual(f.lookup(nullrev), nullid)
97 self.assertEqual(f.lookup(hex(nullid)), nullid)
98 self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
94 self.assertEqual(f.lookup(f.nullid), f.nullid)
95 self.assertEqual(f.lookup(nullrev), f.nullid)
96 self.assertEqual(f.lookup(hex(f.nullid)), f.nullid)
97 self.assertEqual(f.lookup(b'%d' % nullrev), f.nullid)
99 98
100 99 with self.assertRaises(error.LookupError):
101 100 f.lookup(b'badvalue')
102 101
103 102 with self.assertRaises(error.LookupError):
104 f.lookup(hex(nullid)[0:12])
103 f.lookup(hex(f.nullid)[0:12])
105 104
106 105 with self.assertRaises(error.LookupError):
107 106 f.lookup(b'-2')
@@ -140,19 +139,19 b' class ifileindextests(basetestcase):'
140 139 with self.assertRaises(IndexError):
141 140 f.iscensored(i)
142 141
143 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
142 self.assertEqual(list(f.commonancestorsheads(f.nullid, f.nullid)), [])
144 143
145 144 with self.assertRaises(ValueError):
146 145 self.assertEqual(list(f.descendants([])), [])
147 146
148 147 self.assertEqual(list(f.descendants([nullrev])), [])
149 148
150 self.assertEqual(f.heads(), [nullid])
151 self.assertEqual(f.heads(nullid), [nullid])
152 self.assertEqual(f.heads(None, [nullid]), [nullid])
153 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
149 self.assertEqual(f.heads(), [f.nullid])
150 self.assertEqual(f.heads(f.nullid), [f.nullid])
151 self.assertEqual(f.heads(None, [f.nullid]), [f.nullid])
152 self.assertEqual(f.heads(f.nullid, [f.nullid]), [f.nullid])
154 153
155 self.assertEqual(f.children(nullid), [])
154 self.assertEqual(f.children(f.nullid), [])
156 155
157 156 with self.assertRaises(error.LookupError):
158 157 f.children(b'\x01' * 20)
@@ -160,7 +159,7 b' class ifileindextests(basetestcase):'
160 159 def testsinglerevision(self):
161 160 f = self._makefilefn()
162 161 with self._maketransactionfn() as tr:
163 node = f.add(b'initial', None, tr, 0, nullid, nullid)
162 node = f.add(b'initial', None, tr, 0, f.nullid, f.nullid)
164 163
165 164 self.assertEqual(len(f), 1)
166 165 self.assertEqual(list(f), [0])
@@ -174,7 +173,7 b' class ifileindextests(basetestcase):'
174 173 self.assertTrue(f.hasnode(node))
175 174 self.assertFalse(f.hasnode(hex(node)))
176 175 self.assertFalse(f.hasnode(nullrev))
177 self.assertFalse(f.hasnode(nullid))
176 self.assertFalse(f.hasnode(f.nullid))
178 177 self.assertFalse(f.hasnode(node[0:12]))
179 178 self.assertFalse(f.hasnode(hex(node)[0:20]))
180 179
@@ -188,7 +187,7 b' class ifileindextests(basetestcase):'
188 187 self.assertEqual(list(f.revs(1, 0)), [1, 0])
189 188 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
190 189
191 self.assertEqual(f.parents(node), (nullid, nullid))
190 self.assertEqual(f.parents(node), (f.nullid, f.nullid))
192 191 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
193 192
194 193 with self.assertRaises(error.LookupError):
@@ -209,7 +208,7 b' class ifileindextests(basetestcase):'
209 208
210 209 self.assertEqual(f.lookup(node), node)
211 210 self.assertEqual(f.lookup(0), node)
212 self.assertEqual(f.lookup(-1), nullid)
211 self.assertEqual(f.lookup(-1), f.nullid)
213 212 self.assertEqual(f.lookup(b'0'), node)
214 213 self.assertEqual(f.lookup(hex(node)), node)
215 214
@@ -256,9 +255,9 b' class ifileindextests(basetestcase):'
256 255
257 256 f = self._makefilefn()
258 257 with self._maketransactionfn() as tr:
259 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
260 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
261 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
258 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
259 node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
260 node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
262 261
263 262 self.assertEqual(len(f), 3)
264 263 self.assertEqual(list(f), [0, 1, 2])
@@ -284,9 +283,9 b' class ifileindextests(basetestcase):'
284 283 # TODO this is wrong
285 284 self.assertEqual(list(f.revs(3, 2)), [3, 2])
286 285
287 self.assertEqual(f.parents(node0), (nullid, nullid))
288 self.assertEqual(f.parents(node1), (node0, nullid))
289 self.assertEqual(f.parents(node2), (node1, nullid))
286 self.assertEqual(f.parents(node0), (f.nullid, f.nullid))
287 self.assertEqual(f.parents(node1), (node0, f.nullid))
288 self.assertEqual(f.parents(node2), (node1, f.nullid))
290 289
291 290 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
292 291 self.assertEqual(f.parentrevs(1), (0, nullrev))
@@ -330,7 +329,7 b' class ifileindextests(basetestcase):'
330 329 with self.assertRaises(IndexError):
331 330 f.iscensored(3)
332 331
333 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
332 self.assertEqual(f.commonancestorsheads(node1, f.nullid), [])
334 333 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
335 334 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
336 335 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
@@ -364,12 +363,12 b' class ifileindextests(basetestcase):'
364 363 f = self._makefilefn()
365 364
366 365 with self._maketransactionfn() as tr:
367 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
368 node1 = f.add(b'1', None, tr, 1, node0, nullid)
369 node2 = f.add(b'2', None, tr, 2, node1, nullid)
370 node3 = f.add(b'3', None, tr, 3, node0, nullid)
371 node4 = f.add(b'4', None, tr, 4, node3, nullid)
372 node5 = f.add(b'5', None, tr, 5, node0, nullid)
366 node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
367 node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
368 node2 = f.add(b'2', None, tr, 2, node1, f.nullid)
369 node3 = f.add(b'3', None, tr, 3, node0, f.nullid)
370 node4 = f.add(b'4', None, tr, 4, node3, f.nullid)
371 node5 = f.add(b'5', None, tr, 5, node0, f.nullid)
373 372
374 373 self.assertEqual(len(f), 6)
375 374
@@ -427,24 +426,24 b' class ifiledatatests(basetestcase):'
427 426 with self.assertRaises(IndexError):
428 427 f.size(i)
429 428
430 self.assertEqual(f.revision(nullid), b'')
431 self.assertEqual(f.rawdata(nullid), b'')
429 self.assertEqual(f.revision(f.nullid), b'')
430 self.assertEqual(f.rawdata(f.nullid), b'')
432 431
433 432 with self.assertRaises(error.LookupError):
434 433 f.revision(b'\x01' * 20)
435 434
436 self.assertEqual(f.read(nullid), b'')
435 self.assertEqual(f.read(f.nullid), b'')
437 436
438 437 with self.assertRaises(error.LookupError):
439 438 f.read(b'\x01' * 20)
440 439
441 self.assertFalse(f.renamed(nullid))
440 self.assertFalse(f.renamed(f.nullid))
442 441
443 442 with self.assertRaises(error.LookupError):
444 443 f.read(b'\x01' * 20)
445 444
446 self.assertTrue(f.cmp(nullid, b''))
447 self.assertTrue(f.cmp(nullid, b'foo'))
445 self.assertTrue(f.cmp(f.nullid, b''))
446 self.assertTrue(f.cmp(f.nullid, b'foo'))
448 447
449 448 with self.assertRaises(error.LookupError):
450 449 f.cmp(b'\x01' * 20, b'irrelevant')
@@ -455,7 +454,7 b' class ifiledatatests(basetestcase):'
455 454 next(gen)
456 455
457 456 # Emitting null node yields nothing.
458 gen = f.emitrevisions([nullid])
457 gen = f.emitrevisions([f.nullid])
459 458 with self.assertRaises(StopIteration):
460 459 next(gen)
461 460
@@ -468,7 +467,7 b' class ifiledatatests(basetestcase):'
468 467
469 468 f = self._makefilefn()
470 469 with self._maketransactionfn() as tr:
471 node = f.add(fulltext, None, tr, 0, nullid, nullid)
470 node = f.add(fulltext, None, tr, 0, f.nullid, f.nullid)
472 471
473 472 self.assertEqual(f.storageinfo(), {})
474 473 self.assertEqual(
@@ -496,10 +495,10 b' class ifiledatatests(basetestcase):'
496 495 rev = next(gen)
497 496
498 497 self.assertEqual(rev.node, node)
499 self.assertEqual(rev.p1node, nullid)
500 self.assertEqual(rev.p2node, nullid)
498 self.assertEqual(rev.p1node, f.nullid)
499 self.assertEqual(rev.p2node, f.nullid)
501 500 self.assertIsNone(rev.linknode)
502 self.assertEqual(rev.basenode, nullid)
501 self.assertEqual(rev.basenode, f.nullid)
503 502 self.assertIsNone(rev.baserevisionsize)
504 503 self.assertIsNone(rev.revision)
505 504 self.assertIsNone(rev.delta)
@@ -512,10 +511,10 b' class ifiledatatests(basetestcase):'
512 511 rev = next(gen)
513 512
514 513 self.assertEqual(rev.node, node)
515 self.assertEqual(rev.p1node, nullid)
516 self.assertEqual(rev.p2node, nullid)
514 self.assertEqual(rev.p1node, f.nullid)
515 self.assertEqual(rev.p2node, f.nullid)
517 516 self.assertIsNone(rev.linknode)
518 self.assertEqual(rev.basenode, nullid)
517 self.assertEqual(rev.basenode, f.nullid)
519 518 self.assertIsNone(rev.baserevisionsize)
520 519 self.assertEqual(rev.revision, fulltext)
521 520 self.assertIsNone(rev.delta)
@@ -534,9 +533,9 b' class ifiledatatests(basetestcase):'
534 533
535 534 f = self._makefilefn()
536 535 with self._maketransactionfn() as tr:
537 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
538 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
539 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
536 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
537 node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
538 node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
540 539
541 540 self.assertEqual(f.storageinfo(), {})
542 541 self.assertEqual(
@@ -596,10 +595,10 b' class ifiledatatests(basetestcase):'
596 595 rev = next(gen)
597 596
598 597 self.assertEqual(rev.node, node0)
599 self.assertEqual(rev.p1node, nullid)
600 self.assertEqual(rev.p2node, nullid)
598 self.assertEqual(rev.p1node, f.nullid)
599 self.assertEqual(rev.p2node, f.nullid)
601 600 self.assertIsNone(rev.linknode)
602 self.assertEqual(rev.basenode, nullid)
601 self.assertEqual(rev.basenode, f.nullid)
603 602 self.assertIsNone(rev.baserevisionsize)
604 603 self.assertEqual(rev.revision, fulltext0)
605 604 self.assertIsNone(rev.delta)
@@ -608,7 +607,7 b' class ifiledatatests(basetestcase):'
608 607
609 608 self.assertEqual(rev.node, node1)
610 609 self.assertEqual(rev.p1node, node0)
611 self.assertEqual(rev.p2node, nullid)
610 self.assertEqual(rev.p2node, f.nullid)
612 611 self.assertIsNone(rev.linknode)
613 612 self.assertEqual(rev.basenode, node0)
614 613 self.assertIsNone(rev.baserevisionsize)
@@ -622,7 +621,7 b' class ifiledatatests(basetestcase):'
622 621
623 622 self.assertEqual(rev.node, node2)
624 623 self.assertEqual(rev.p1node, node1)
625 self.assertEqual(rev.p2node, nullid)
624 self.assertEqual(rev.p2node, f.nullid)
626 625 self.assertIsNone(rev.linknode)
627 626 self.assertEqual(rev.basenode, node1)
628 627 self.assertIsNone(rev.baserevisionsize)
@@ -641,10 +640,10 b' class ifiledatatests(basetestcase):'
641 640 rev = next(gen)
642 641
643 642 self.assertEqual(rev.node, node0)
644 self.assertEqual(rev.p1node, nullid)
645 self.assertEqual(rev.p2node, nullid)
643 self.assertEqual(rev.p1node, f.nullid)
644 self.assertEqual(rev.p2node, f.nullid)
646 645 self.assertIsNone(rev.linknode)
647 self.assertEqual(rev.basenode, nullid)
646 self.assertEqual(rev.basenode, f.nullid)
648 647 self.assertIsNone(rev.baserevisionsize)
649 648 self.assertEqual(rev.revision, fulltext0)
650 649 self.assertIsNone(rev.delta)
@@ -653,7 +652,7 b' class ifiledatatests(basetestcase):'
653 652
654 653 self.assertEqual(rev.node, node1)
655 654 self.assertEqual(rev.p1node, node0)
656 self.assertEqual(rev.p2node, nullid)
655 self.assertEqual(rev.p2node, f.nullid)
657 656 self.assertIsNone(rev.linknode)
658 657 self.assertEqual(rev.basenode, node0)
659 658 self.assertIsNone(rev.baserevisionsize)
@@ -667,7 +666,7 b' class ifiledatatests(basetestcase):'
667 666
668 667 self.assertEqual(rev.node, node2)
669 668 self.assertEqual(rev.p1node, node1)
670 self.assertEqual(rev.p2node, nullid)
669 self.assertEqual(rev.p2node, f.nullid)
671 670 self.assertIsNone(rev.linknode)
672 671 self.assertEqual(rev.basenode, node1)
673 672 self.assertIsNone(rev.baserevisionsize)
@@ -700,16 +699,16 b' class ifiledatatests(basetestcase):'
700 699 rev = next(gen)
701 700 self.assertEqual(rev.node, node2)
702 701 self.assertEqual(rev.p1node, node1)
703 self.assertEqual(rev.p2node, nullid)
704 self.assertEqual(rev.basenode, nullid)
702 self.assertEqual(rev.p2node, f.nullid)
703 self.assertEqual(rev.basenode, f.nullid)
705 704 self.assertIsNone(rev.baserevisionsize)
706 705 self.assertEqual(rev.revision, fulltext2)
707 706 self.assertIsNone(rev.delta)
708 707
709 708 rev = next(gen)
710 709 self.assertEqual(rev.node, node0)
711 self.assertEqual(rev.p1node, nullid)
712 self.assertEqual(rev.p2node, nullid)
710 self.assertEqual(rev.p1node, f.nullid)
711 self.assertEqual(rev.p2node, f.nullid)
713 712 # Delta behavior is storage dependent, so we can't easily test it.
714 713
715 714 with self.assertRaises(StopIteration):
@@ -722,8 +721,8 b' class ifiledatatests(basetestcase):'
722 721 rev = next(gen)
723 722 self.assertEqual(rev.node, node1)
724 723 self.assertEqual(rev.p1node, node0)
725 self.assertEqual(rev.p2node, nullid)
726 self.assertEqual(rev.basenode, nullid)
724 self.assertEqual(rev.p2node, f.nullid)
725 self.assertEqual(rev.basenode, f.nullid)
727 726 self.assertIsNone(rev.baserevisionsize)
728 727 self.assertEqual(rev.revision, fulltext1)
729 728 self.assertIsNone(rev.delta)
@@ -731,7 +730,7 b' class ifiledatatests(basetestcase):'
731 730 rev = next(gen)
732 731 self.assertEqual(rev.node, node2)
733 732 self.assertEqual(rev.p1node, node1)
734 self.assertEqual(rev.p2node, nullid)
733 self.assertEqual(rev.p2node, f.nullid)
735 734 self.assertEqual(rev.basenode, node1)
736 735 self.assertIsNone(rev.baserevisionsize)
737 736 self.assertIsNone(rev.revision)
@@ -751,7 +750,7 b' class ifiledatatests(basetestcase):'
751 750 rev = next(gen)
752 751 self.assertEqual(rev.node, node1)
753 752 self.assertEqual(rev.p1node, node0)
754 self.assertEqual(rev.p2node, nullid)
753 self.assertEqual(rev.p2node, f.nullid)
755 754 self.assertEqual(rev.basenode, node0)
756 755 self.assertIsNone(rev.baserevisionsize)
757 756 self.assertIsNone(rev.revision)
@@ -768,9 +767,9 b' class ifiledatatests(basetestcase):'
768 767
769 768 rev = next(gen)
770 769 self.assertEqual(rev.node, node0)
771 self.assertEqual(rev.p1node, nullid)
772 self.assertEqual(rev.p2node, nullid)
773 self.assertEqual(rev.basenode, nullid)
770 self.assertEqual(rev.p1node, f.nullid)
771 self.assertEqual(rev.p2node, f.nullid)
772 self.assertEqual(rev.basenode, f.nullid)
774 773 self.assertIsNone(rev.baserevisionsize)
775 774 self.assertIsNone(rev.revision)
776 775 self.assertEqual(
@@ -789,9 +788,9 b' class ifiledatatests(basetestcase):'
789 788
790 789 rev = next(gen)
791 790 self.assertEqual(rev.node, node0)
792 self.assertEqual(rev.p1node, nullid)
793 self.assertEqual(rev.p2node, nullid)
794 self.assertEqual(rev.basenode, nullid)
791 self.assertEqual(rev.p1node, f.nullid)
792 self.assertEqual(rev.p2node, f.nullid)
793 self.assertEqual(rev.basenode, f.nullid)
795 794 self.assertIsNone(rev.baserevisionsize)
796 795 self.assertIsNone(rev.revision)
797 796 self.assertEqual(
@@ -802,7 +801,7 b' class ifiledatatests(basetestcase):'
802 801 rev = next(gen)
803 802 self.assertEqual(rev.node, node2)
804 803 self.assertEqual(rev.p1node, node1)
805 self.assertEqual(rev.p2node, nullid)
804 self.assertEqual(rev.p2node, f.nullid)
806 805 self.assertEqual(rev.basenode, node0)
807 806
808 807 with self.assertRaises(StopIteration):
@@ -841,11 +840,11 b' class ifiledatatests(basetestcase):'
841 840
842 841 f = self._makefilefn()
843 842 with self._maketransactionfn() as tr:
844 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
845 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
846 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
843 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
844 node1 = f.add(fulltext1, meta1, tr, 1, node0, f.nullid)
845 node2 = f.add(fulltext2, meta2, tr, 2, f.nullid, f.nullid)
847 846
848 # Metadata header isn't recognized when parent isn't nullid.
847 # Metadata header isn't recognized when parent isn't f.nullid.
849 848 self.assertEqual(f.size(1), len(stored1))
850 849 self.assertEqual(f.size(2), len(fulltext2))
851 850
@@ -886,8 +885,8 b' class ifiledatatests(basetestcase):'
886 885
887 886 f = self._makefilefn()
888 887 with self._maketransactionfn() as tr:
889 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
890 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
888 node0 = f.add(fulltext0, {}, tr, 0, f.nullid, f.nullid)
889 node1 = f.add(fulltext1, meta1, tr, 1, f.nullid, f.nullid)
891 890
892 891 # TODO this is buggy.
893 892 self.assertEqual(f.size(0), len(fulltext0) + 4)
@@ -916,15 +915,15 b' class ifiledatatests(basetestcase):'
916 915 fulltext1 = fulltext0 + b'bar\n'
917 916
918 917 with self._maketransactionfn() as tr:
919 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
918 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
920 919 node1 = b'\xaa' * 20
921 920
922 921 self._addrawrevisionfn(
923 f, tr, node1, node0, nullid, 1, rawtext=fulltext1
922 f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
924 923 )
925 924
926 925 self.assertEqual(len(f), 2)
927 self.assertEqual(f.parents(node1), (node0, nullid))
926 self.assertEqual(f.parents(node1), (node0, f.nullid))
928 927
929 928 # revision() raises since it performs hash verification.
930 929 with self.assertRaises(error.StorageError):
@@ -951,11 +950,11 b' class ifiledatatests(basetestcase):'
951 950 fulltext1 = fulltext0 + b'bar\n'
952 951
953 952 with self._maketransactionfn() as tr:
954 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
953 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
955 954 node1 = b'\xaa' * 20
956 955
957 956 self._addrawrevisionfn(
958 f, tr, node1, node0, nullid, 1, rawtext=fulltext1
957 f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
959 958 )
960 959
961 960 with self.assertRaises(error.StorageError):
@@ -973,11 +972,11 b' class ifiledatatests(basetestcase):'
973 972 fulltext1 = fulltext0 + b'bar\n'
974 973
975 974 with self._maketransactionfn() as tr:
976 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
975 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
977 976 node1 = b'\xaa' * 20
978 977
979 978 self._addrawrevisionfn(
980 f, tr, node1, node0, nullid, 1, rawtext=fulltext1
979 f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
981 980 )
982 981
983 982 with self.assertRaises(error.StorageError):
@@ -994,22 +993,22 b' class ifiledatatests(basetestcase):'
994 993 fulltext2 = fulltext1 + b'baz\n'
995 994
996 995 with self._maketransactionfn() as tr:
997 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
996 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
998 997 node1 = b'\xaa' * 20
999 998
1000 999 self._addrawrevisionfn(
1001 f, tr, node1, node0, nullid, 1, rawtext=fulltext1
1000 f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
1002 1001 )
1003 1002
1004 1003 with self.assertRaises(error.StorageError):
1005 1004 f.read(node1)
1006 1005
1007 node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
1006 node2 = storageutil.hashrevisionsha1(fulltext2, node1, f.nullid)
1008 1007
1009 1008 with self._maketransactionfn() as tr:
1010 1009 delta = mdiff.textdiff(fulltext1, fulltext2)
1011 1010 self._addrawrevisionfn(
1012 f, tr, node2, node1, nullid, 2, delta=(1, delta)
1011 f, tr, node2, node1, f.nullid, 2, delta=(1, delta)
1013 1012 )
1014 1013
1015 1014 self.assertEqual(len(f), 3)
@@ -1029,13 +1028,13 b' class ifiledatatests(basetestcase):'
1029 1028 )
1030 1029
1031 1030 with self._maketransactionfn() as tr:
1032 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1031 node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
1033 1032
1034 1033 # The node value doesn't matter since we can't verify it.
1035 1034 node1 = b'\xbb' * 20
1036 1035
1037 1036 self._addrawrevisionfn(
1038 f, tr, node1, node0, nullid, 1, stored1, censored=True
1037 f, tr, node1, node0, f.nullid, 1, stored1, censored=True
1039 1038 )
1040 1039
1041 1040 self.assertTrue(f.iscensored(1))
@@ -1063,13 +1062,13 b' class ifiledatatests(basetestcase):'
1063 1062 )
1064 1063
1065 1064 with self._maketransactionfn() as tr:
1066 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1065 node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
1067 1066
1068 1067 # The node value doesn't matter since we can't verify it.
1069 1068 node1 = b'\xbb' * 20
1070 1069
1071 1070 self._addrawrevisionfn(
1072 f, tr, node1, node0, nullid, 1, stored1, censored=True
1071 f, tr, node1, node0, f.nullid, 1, stored1, censored=True
1073 1072 )
1074 1073
1075 1074 with self.assertRaises(error.CensoredNodeError):
@@ -1088,10 +1087,10 b' class ifilemutationtests(basetestcase):'
1088 1087 def testaddnoop(self):
1089 1088 f = self._makefilefn()
1090 1089 with self._maketransactionfn() as tr:
1091 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1092 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
1090 node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
1091 node1 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
1093 1092 # Varying by linkrev shouldn't impact hash.
1094 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
1093 node2 = f.add(b'foo', None, tr, 1, f.nullid, f.nullid)
1095 1094
1096 1095 self.assertEqual(node1, node0)
1097 1096 self.assertEqual(node2, node0)
@@ -1102,7 +1101,9 b' class ifilemutationtests(basetestcase):'
1102 1101 with self._maketransactionfn() as tr:
1103 1102 # Adding a revision with bad node value fails.
1104 1103 with self.assertRaises(error.StorageError):
1105 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
1104 f.addrevision(
1105 b'foo', tr, 0, f.nullid, f.nullid, node=b'\x01' * 20
1106 )
1106 1107
1107 1108 def testaddrevisionunknownflag(self):
1108 1109 f = self._makefilefn()
@@ -1113,7 +1114,7 b' class ifilemutationtests(basetestcase):'
1113 1114 break
1114 1115
1115 1116 with self.assertRaises(error.StorageError):
1116 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
1117 f.addrevision(b'foo', tr, 0, f.nullid, f.nullid, flags=flags)
1117 1118
1118 1119 def testaddgroupsimple(self):
1119 1120 f = self._makefilefn()
@@ -1153,12 +1154,12 b' class ifilemutationtests(basetestcase):'
1153 1154 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
1154 1155
1155 1156 with self._maketransactionfn() as tr:
1156 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
1157 node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
1157 1158
1158 1159 f = self._makefilefn()
1159 1160
1160 1161 deltas = [
1161 (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
1162 (node0, f.nullid, f.nullid, f.nullid, f.nullid, delta0, 0, {}),
1162 1163 ]
1163 1164
1164 1165 with self._maketransactionfn() as tr:
@@ -1207,7 +1208,7 b' class ifilemutationtests(basetestcase):'
1207 1208 nodes = []
1208 1209 with self._maketransactionfn() as tr:
1209 1210 for fulltext in fulltexts:
1210 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1211 nodes.append(f.add(fulltext, None, tr, 0, f.nullid, f.nullid))
1211 1212
1212 1213 f = self._makefilefn()
1213 1214 deltas = []
@@ -1215,7 +1216,7 b' class ifilemutationtests(basetestcase):'
1215 1216 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1216 1217
1217 1218 deltas.append(
1218 (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
1219 (nodes[i], f.nullid, f.nullid, f.nullid, f.nullid, delta, 0, {})
1219 1220 )
1220 1221
1221 1222 with self._maketransactionfn() as tr:
@@ -1254,18 +1255,18 b' class ifilemutationtests(basetestcase):'
1254 1255 )
1255 1256
1256 1257 with self._maketransactionfn() as tr:
1257 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1258 node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
1258 1259
1259 1260 # The node value doesn't matter since we can't verify it.
1260 1261 node1 = b'\xbb' * 20
1261 1262
1262 1263 self._addrawrevisionfn(
1263 f, tr, node1, node0, nullid, 1, stored1, censored=True
1264 f, tr, node1, node0, f.nullid, 1, stored1, censored=True
1264 1265 )
1265 1266
1266 1267 delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
1267 1268 deltas = [
1268 (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
1269 (b'\xcc' * 20, node1, f.nullid, b'\x01' * 20, node1, delta, 0, {})
1269 1270 ]
1270 1271
1271 1272 with self._maketransactionfn() as tr:
@@ -1276,9 +1277,9 b' class ifilemutationtests(basetestcase):'
1276 1277 f = self._makefilefn()
1277 1278
1278 1279 with self._maketransactionfn() as tr:
1279 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1280 node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
1281 node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
1280 node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
1281 node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, f.nullid)
1282 node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, f.nullid)
1282 1283
1283 1284 with self._maketransactionfn() as tr:
1284 1285 f.censorrevision(tr, node1)
@@ -1298,7 +1299,7 b' class ifilemutationtests(basetestcase):'
1298 1299
1299 1300 with self._maketransactionfn() as tr:
1300 1301 for rev in range(10):
1301 f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
1302 f.add(b'%d' % rev, None, tr, rev, f.nullid, f.nullid)
1302 1303
1303 1304 for rev in range(10):
1304 1305 self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1308,10 +1309,10 b' class ifilemutationtests(basetestcase):'
1308 1309 f = self._makefilefn()
1309 1310
1310 1311 with self._maketransactionfn() as tr:
1311 p1 = nullid
1312 p1 = f.nullid
1312 1313
1313 1314 for rev in range(10):
1314 f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1315 f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
1315 1316
1316 1317 for rev in range(10):
1317 1318 self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1320,11 +1321,11 b' class ifilemutationtests(basetestcase):'
1320 1321 f = self._makefilefn()
1321 1322
1322 1323 with self._maketransactionfn() as tr:
1323 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1324 node1 = f.add(b'1', None, tr, 1, node0, nullid)
1325 f.add(b'2', None, tr, 2, node1, nullid)
1326 f.add(b'3', None, tr, 3, node0, nullid)
1327 f.add(b'4', None, tr, 4, node0, nullid)
1324 node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
1325 node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
1326 f.add(b'2', None, tr, 2, node1, f.nullid)
1327 f.add(b'3', None, tr, 3, node0, f.nullid)
1328 f.add(b'4', None, tr, 4, node0, f.nullid)
1328 1329
1329 1330 for rev in range(5):
1330 1331 self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1333,9 +1334,9 b' class ifilemutationtests(basetestcase):'
1333 1334 f = self._makefilefn()
1334 1335
1335 1336 with self._maketransactionfn() as tr:
1336 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1337 f.add(b'1', None, tr, 10, node0, nullid)
1338 f.add(b'2', None, tr, 5, node0, nullid)
1337 node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
1338 f.add(b'1', None, tr, 10, node0, f.nullid)
1339 f.add(b'2', None, tr, 5, node0, f.nullid)
1339 1340
1340 1341 self.assertEqual(f.getstrippoint(0), (0, set()))
1341 1342 self.assertEqual(f.getstrippoint(1), (1, set()))
@@ -1362,9 +1363,9 b' class ifilemutationtests(basetestcase):'
1362 1363 f = self._makefilefn()
1363 1364
1364 1365 with self._maketransactionfn() as tr:
1365 p1 = nullid
1366 p1 = f.nullid
1366 1367 for rev in range(10):
1367 p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1368 p1 = f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
1368 1369
1369 1370 self.assertEqual(len(f), 10)
1370 1371
@@ -1377,9 +1378,9 b' class ifilemutationtests(basetestcase):'
1377 1378 f = self._makefilefn()
1378 1379
1379 1380 with self._maketransactionfn() as tr:
1380 f.add(b'0', None, tr, 0, nullid, nullid)
1381 node1 = f.add(b'1', None, tr, 5, nullid, nullid)
1382 node2 = f.add(b'2', None, tr, 10, nullid, nullid)
1381 f.add(b'0', None, tr, 0, f.nullid, f.nullid)
1382 node1 = f.add(b'1', None, tr, 5, f.nullid, f.nullid)
1383 node2 = f.add(b'2', None, tr, 10, f.nullid, f.nullid)
1383 1384
1384 1385 self.assertEqual(len(f), 3)
1385 1386
@@ -10,10 +10,7 b' from __future__ import absolute_import'
10 10 import collections
11 11
12 12 from .i18n import _
13 from .node import (
14 nullid,
15 short,
16 )
13 from .node import short
17 14 from . import (
18 15 error,
19 16 pycompat,
@@ -44,11 +41,11 b' def findcommonincoming(repo, remote, hea'
44 41 if audit is not None:
45 42 audit[b'total-roundtrips'] = 1
46 43
47 if repo.changelog.tip() == nullid:
48 base.add(nullid)
49 if heads != [nullid]:
50 return [nullid], [nullid], list(heads)
51 return [nullid], [], heads
44 if repo.changelog.tip() == repo.nullid:
45 base.add(repo.nullid)
46 if heads != [repo.nullid]:
47 return [repo.nullid], [repo.nullid], list(heads)
48 return [repo.nullid], [], heads
52 49
53 50 # assume we're closer to the tip than the root
54 51 # and start by examining the heads
@@ -84,7 +81,7 b' def findcommonincoming(repo, remote, hea'
84 81 continue
85 82
86 83 repo.ui.debug(b"examining %s:%s\n" % (short(n[0]), short(n[1])))
87 if n[0] == nullid: # found the end of the branch
84 if n[0] == repo.nullid: # found the end of the branch
88 85 pass
89 86 elif n in seenbranch:
90 87 repo.ui.debug(b"branch already found\n")
@@ -170,7 +167,7 b' def findcommonincoming(repo, remote, hea'
170 167 raise error.RepoError(_(b"already have changeset ") + short(f[:4]))
171 168
172 169 base = list(base)
173 if base == [nullid]:
170 if base == [repo.nullid]:
174 171 if force:
175 172 repo.ui.warn(_(b"warning: repository is unrelated\n"))
176 173 else:
@@ -34,6 +34,7 b' import time'
34 34 import traceback
35 35 import warnings
36 36
37 from .node import hex
37 38 from .thirdparty import attr
38 39 from .pycompat import (
39 40 delattr,
@@ -13,8 +13,8 b' import struct'
13 13 from ..i18n import _
14 14 from ..node import (
15 15 bin,
16 nullid,
17 16 nullrev,
17 sha1nodeconstants,
18 18 )
19 19 from .. import (
20 20 dagop,
@@ -26,7 +26,7 b' from ..interfaces import repository'
26 26 from ..revlogutils import sidedata as sidedatamod
27 27 from ..utils import hashutil
28 28
29 _nullhash = hashutil.sha1(nullid)
29 _nullhash = hashutil.sha1(sha1nodeconstants.nullid)
30 30
31 31
32 32 def hashrevisionsha1(text, p1, p2):
@@ -37,7 +37,7 b' def hashrevisionsha1(text, p1, p2):'
37 37 content in the revision graph.
38 38 """
39 39 # As of now, if one of the parent node is null, p2 is null
40 if p2 == nullid:
40 if p2 == sha1nodeconstants.nullid:
41 41 # deep copy of a hash is faster than creating one
42 42 s = _nullhash.copy()
43 43 s.update(p1)
@@ -107,7 +107,7 b' def filerevisioncopied(store, node):'
107 107 Returns ``False`` if the file has no copy metadata. Otherwise a
108 108 2-tuple of the source filename and node.
109 109 """
110 if store.parents(node)[0] != nullid:
110 if store.parents(node)[0] != sha1nodeconstants.nullid:
111 111 return False
112 112
113 113 meta = parsemeta(store.revision(node))[0]
@@ -10,13 +10,8 b' from __future__ import absolute_import'
10 10 import os
11 11
12 12 from .i18n import _
13 from .node import (
14 nullid,
15 short,
16 )
17 from .utils import (
18 stringutil,
19 )
13 from .node import short
14 from .utils import stringutil
20 15
21 16 from . import (
22 17 error,
@@ -159,13 +154,13 b' class verifier(object):'
159 154
160 155 try:
161 156 p1, p2 = obj.parents(node)
162 if p1 not in seen and p1 != nullid:
157 if p1 not in seen and p1 != self.repo.nullid:
163 158 self._err(
164 159 lr,
165 160 _(b"unknown parent 1 %s of %s") % (short(p1), short(node)),
166 161 f,
167 162 )
168 if p2 not in seen and p2 != nullid:
163 if p2 not in seen and p2 != self.repo.nullid:
169 164 self._err(
170 165 lr,
171 166 _(b"unknown parent 2 %s of %s") % (short(p2), short(node)),
@@ -267,7 +262,7 b' class verifier(object):'
267 262
268 263 try:
269 264 changes = cl.read(n)
270 if changes[0] != nullid:
265 if changes[0] != self.repo.nullid:
271 266 mflinkrevs.setdefault(changes[0], []).append(i)
272 267 self.refersmf = True
273 268 for f in changes[3]:
@@ -598,7 +593,7 b' class verifier(object):'
598 593 % (rp[0], short(rp[1])),
599 594 f,
600 595 )
601 elif rp[1] == nullid:
596 elif rp[1] == self.repo.nullid:
602 597 ui.note(
603 598 _(
604 599 b"warning: %s@%s: copy source"
@@ -11,10 +11,7 b' import binascii'
11 11 import os
12 12
13 13 from .i18n import _
14 from .node import (
15 hex,
16 nullid,
17 )
14 from .node import hex
18 15 from .pycompat import getattr
19 16
20 17 from . import (
@@ -470,7 +467,7 b' def getbundle(repo, proto, others):'
470 467 clheads = set(repo.changelog.heads())
471 468 heads = set(opts.get(b'heads', set()))
472 469 common = set(opts.get(b'common', set()))
473 common.discard(nullid)
470 common.discard(repo.nullid)
474 471 if (
475 472 repo.ui.configbool(b'server', b'pullbundle')
476 473 and b'partial-pull' in proto.getprotocaps()
@@ -10,10 +10,7 b' import collections'
10 10 import contextlib
11 11
12 12 from .i18n import _
13 from .node import (
14 hex,
15 nullid,
16 )
13 from .node import hex
17 14 from . import (
18 15 discovery,
19 16 encoding,
@@ -950,7 +947,7 b' def resolvenodes(repo, revisions):'
950 947 if spec[b'roots']:
951 948 common = [n for n in spec[b'roots'] if clhasnode(n)]
952 949 else:
953 common = [nullid]
950 common = [repo.nullid]
954 951
955 952 for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
956 953 if n not in seen:
@@ -86,7 +86,6 b' import collections'
86 86 import itertools
87 87 import re
88 88
89 from mercurial.node import nullid
90 89 from mercurial.i18n import _
91 90 from mercurial import (
92 91 context,
@@ -299,7 +298,7 b' class simplecommitctx(context.committabl'
299 298 self._added = added
300 299 self._parents = parentctxs
301 300 while len(self._parents) < 2:
302 self._parents.append(repo[nullid])
301 self._parents.append(repo[repo.nullid])
303 302
304 303 def filectx(self, key):
305 304 return simplefilectx(key, self._added[key])
@@ -388,7 +387,7 b' def debugdrawdag(ui, repo, **opts):'
388 387 content = content.replace(br'\n', b'\n').replace(br'\1', b'\1')
389 388 files[name][path] = content
390 389
391 committed = {None: nullid} # {name: node}
390 committed = {None: repo.nullid} # {name: node}
392 391
393 392 # for leaf nodes, try to find existing nodes in repo
394 393 for name, parents in edges.items():
@@ -18,7 +18,6 b' from mercurial.i18n import _'
18 18 from mercurial.node import (
19 19 bin,
20 20 hex,
21 nullid,
22 21 nullrev,
23 22 )
24 23 from mercurial.thirdparty import attr
@@ -136,18 +135,18 b' class filestorage(object):'
136 135 self._indexbynode[entry[b'node']] = entry
137 136 self._indexbyrev[i] = entry
138 137
139 self._indexbynode[nullid] = {
140 b'node': nullid,
141 b'p1': nullid,
142 b'p2': nullid,
138 self._indexbynode[self._repo.nullid] = {
139 b'node': self._repo.nullid,
140 b'p1': self._repo.nullid,
141 b'p2': self._repo.nullid,
143 142 b'linkrev': nullrev,
144 143 b'flags': 0,
145 144 }
146 145
147 146 self._indexbyrev[nullrev] = {
148 b'node': nullid,
149 b'p1': nullid,
150 b'p2': nullid,
147 b'node': self._repo.nullid,
148 b'p1': self._repo.nullid,
149 b'p2': self._repo.nullid,
151 150 b'linkrev': nullrev,
152 151 b'flags': 0,
153 152 }
@@ -160,7 +159,7 b' class filestorage(object):'
160 159 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
161 160 )
162 161
163 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
162 self._index.append((0, 0, 0, -1, -1, -1, -1, self._repo.nullid))
164 163
165 164 def __len__(self):
166 165 return len(self._indexdata)
@@ -288,7 +287,7 b' class filestorage(object):'
288 287 node = nodeorrev
289 288 validatenode(node)
290 289
291 if node == nullid:
290 if node == self._repo.nullid:
292 291 return b''
293 292
294 293 rev = self.rev(node)
@@ -325,7 +324,7 b' class filestorage(object):'
325 324 def renamed(self, node):
326 325 validatenode(node)
327 326
328 if self.parents(node)[0] != nullid:
327 if self.parents(node)[0] != self._repo.nullid:
329 328 return False
330 329
331 330 fulltext = self.revision(node)
@@ -451,7 +450,7 b' class filestorage(object):'
451 450 sidedata_helpers=None,
452 451 ):
453 452 # TODO this will probably break on some ordering options.
454 nodes = [n for n in nodes if n != nullid]
453 nodes = [n for n in nodes if n != self._repo.nullid]
455 454 if not nodes:
456 455 return
457 456 for delta in storageutil.emitrevisions(
@@ -559,7 +558,7 b' class filestorage(object):'
559 558 continue
560 559
561 560 # Need to resolve the fulltext from the delta base.
562 if deltabase == nullid:
561 if deltabase == self._repo.nullid:
563 562 text = mdiff.patch(b'', delta)
564 563 else:
565 564 text = mdiff.patch(self.revision(deltabase), delta)
@@ -588,11 +587,11 b' class filestorage(object):'
588 587 # This is copied from revlog.py.
589 588 if start is None and stop is None:
590 589 if not len(self):
591 return [nullid]
590 return [self._repo.nullid]
592 591 return [self.node(r) for r in self._headrevs()]
593 592
594 593 if start is None:
595 start = nullid
594 start = self._repo.nullid
596 595 if stop is None:
597 596 stop = []
598 597 stoprevs = {self.rev(n) for n in stop}
@@ -479,19 +479,19 b' and its ancestor by overriding "repo._fi'
479 479
480 480 $ cat > ../legacyrepo.py <<EOF
481 481 > from __future__ import absolute_import
482 > from mercurial import commit, error, extensions, node
482 > from mercurial import commit, error, extensions
483 483 > def _filecommit(orig, repo, fctx, manifest1, manifest2,
484 484 > linkrev, tr, includecopymeta, ms):
485 485 > fname = fctx.path()
486 486 > text = fctx.data()
487 487 > flog = repo.file(fname)
488 > fparent1 = manifest1.get(fname, node.nullid)
489 > fparent2 = manifest2.get(fname, node.nullid)
488 > fparent1 = manifest1.get(fname, repo.nullid)
489 > fparent2 = manifest2.get(fname, repo.nullid)
490 490 > meta = {}
491 491 > copy = fctx.copysource()
492 492 > if copy and copy != fname:
493 493 > raise error.Abort('copying is not supported')
494 > if fparent2 != node.nullid:
494 > if fparent2 != repo.nullid:
495 495 > return flog.add(text, meta, tr, linkrev,
496 496 > fparent1, fparent2), 'modified'
497 497 > raise error.Abort('only merging is supported')
@@ -646,14 +646,14 b' Test making empty commits'
646 646 verify pathauditor blocks evil filepaths
647 647 $ cat > evil-commit.py <<EOF
648 648 > from __future__ import absolute_import
649 > from mercurial import context, hg, node, ui as uimod
649 > from mercurial import context, hg, ui as uimod
650 650 > notrc = u".h\u200cg".encode('utf-8') + b'/hgrc'
651 651 > u = uimod.ui.load()
652 652 > r = hg.repository(u, b'.')
653 653 > def filectxfn(repo, memctx, path):
654 654 > return context.memfilectx(repo, memctx, path,
655 655 > b'[hooks]\nupdate = echo owned')
656 > c = context.memctx(r, [r.changelog.tip(), node.nullid],
656 > c = context.memctx(r, [r.changelog.tip(), r.nullid],
657 657 > b'evil', [notrc], filectxfn, 0)
658 658 > r.commitctx(c)
659 659 > EOF
@@ -672,14 +672,14 b' verify pathauditor blocks evil filepaths'
672 672 repository tip rolled back to revision 2 (undo commit)
673 673 $ cat > evil-commit.py <<EOF
674 674 > from __future__ import absolute_import
675 > from mercurial import context, hg, node, ui as uimod
675 > from mercurial import context, hg, ui as uimod
676 676 > notrc = b"HG~1/hgrc"
677 677 > u = uimod.ui.load()
678 678 > r = hg.repository(u, b'.')
679 679 > def filectxfn(repo, memctx, path):
680 680 > return context.memfilectx(repo, memctx, path,
681 681 > b'[hooks]\nupdate = echo owned')
682 > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
682 > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
683 683 > b'evil', [notrc], filectxfn, 0)
684 684 > r.commitctx(c)
685 685 > EOF
@@ -692,14 +692,14 b' verify pathauditor blocks evil filepaths'
692 692 repository tip rolled back to revision 2 (undo commit)
693 693 $ cat > evil-commit.py <<EOF
694 694 > from __future__ import absolute_import
695 > from mercurial import context, hg, node, ui as uimod
695 > from mercurial import context, hg, ui as uimod
696 696 > notrc = b"HG8B6C~2/hgrc"
697 697 > u = uimod.ui.load()
698 698 > r = hg.repository(u, b'.')
699 699 > def filectxfn(repo, memctx, path):
700 700 > return context.memfilectx(repo, memctx, path,
701 701 > b'[hooks]\nupdate = echo owned')
702 > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
702 > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
703 703 > b'evil', [notrc], filectxfn, 0)
704 704 > r.commitctx(c)
705 705 > EOF
@@ -482,19 +482,19 b' and its ancestor by overriding "repo._fi'
482 482
483 483 $ cat > ../legacyrepo.py <<EOF
484 484 > from __future__ import absolute_import
485 > from mercurial import commit, error, extensions, node
485 > from mercurial import commit, error, extensions
486 486 > def _filecommit(orig, repo, fctx, manifest1, manifest2,
487 487 > linkrev, tr, includecopymeta, ms):
488 488 > fname = fctx.path()
489 489 > text = fctx.data()
490 490 > flog = repo.file(fname)
491 > fparent1 = manifest1.get(fname, node.nullid)
492 > fparent2 = manifest2.get(fname, node.nullid)
491 > fparent1 = manifest1.get(fname, repo.nullid)
492 > fparent2 = manifest2.get(fname, repo.nullid)
493 493 > meta = {}
494 494 > copy = fctx.copysource()
495 495 > if copy and copy != fname:
496 496 > raise error.Abort('copying is not supported')
497 > if fparent2 != node.nullid:
497 > if fparent2 != repo.nullid:
498 498 > return flog.add(text, meta, tr, linkrev,
499 499 > fparent1, fparent2), 'modified'
500 500 > raise error.Abort('only merging is supported')
@@ -4,10 +4,7 b' Tests the behavior of filelog w.r.t. dat'
4 4 """
5 5 from __future__ import absolute_import, print_function
6 6
7 from mercurial.node import (
8 hex,
9 nullid,
10 )
7 from mercurial.node import hex
11 8 from mercurial import (
12 9 hg,
13 10 ui as uimod,
@@ -22,7 +19,7 b" fl = repo.file(b'foobar')"
22 19 def addrev(text, renamed=False):
23 20 if renamed:
24 21 # data doesn't matter. Just make sure filelog.renamed() returns True
25 meta = {b'copyrev': hex(nullid), b'copy': b'bar'}
22 meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
26 23 else:
27 24 meta = {}
28 25
@@ -30,7 +27,7 b' def addrev(text, renamed=False):'
30 27 try:
31 28 lock = repo.lock()
32 29 t = repo.transaction(b'commit')
33 node = fl.add(text, meta, t, 0, nullid, nullid)
30 node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
34 31 return node
35 32 finally:
36 33 if t:
@@ -14,8 +14,8 b' import unittest'
14 14 from mercurial.node import (
15 15 bin,
16 16 hex,
17 nullid,
18 17 nullrev,
18 sha1nodeconstants,
19 19 )
20 20 from mercurial import (
21 21 policy,
@@ -40,7 +40,7 b' def py_parseindex(data, inline):'
40 40 s = 64
41 41 cache = None
42 42 index = []
43 nodemap = {nullid: nullrev}
43 nodemap = {sha1nodeconstants.nullid: nullrev}
44 44 n = off = 0
45 45
46 46 l = len(data) - s
@@ -227,7 +227,7 b' class parseindex2tests(unittest.TestCase'
227 227
228 228 ix = parsers.parse_index2(data_inlined, True)[0]
229 229 for i, r in enumerate(ix):
230 if r[7] == nullid:
230 if r[7] == sha1nodeconstants.nullid:
231 231 i = -1
232 232 try:
233 233 self.assertEqual(
@@ -240,7 +240,7 b' class parseindex2tests(unittest.TestCase'
240 240 break
241 241
242 242 def testminusone(self):
243 want = (0, 0, 0, -1, -1, -1, -1, nullid)
243 want = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid)
244 244 index, junk = parsers.parse_index2(data_inlined, True)
245 245 got = index[-1]
246 246 self.assertEqual(want, got) # inline data
@@ -16,7 +16,7 b' import silenttestrunner'
16 16
17 17 # Load the local remotefilelog, not the system one
18 18 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
19 from mercurial.node import nullid
19 from mercurial.node import sha1nodeconstants
20 20 from mercurial import policy
21 21
22 22 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
@@ -63,7 +63,14 b' class datapacktestsbase(object):'
63 63
64 64 def createPack(self, revisions=None, packdir=None):
65 65 if revisions is None:
66 revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
66 revisions = [
67 (
68 b"filename",
69 self.getFakeHash(),
70 sha1nodeconstants.nullid,
71 b"content",
72 )
73 ]
67 74
68 75 if packdir is None:
69 76 packdir = self.makeTempDir()
@@ -86,7 +93,7 b' class datapacktestsbase(object):'
86 93 filename = b"foo"
87 94 node = self.getHash(content)
88 95
89 revisions = [(filename, node, nullid, content)]
96 revisions = [(filename, node, sha1nodeconstants.nullid, content)]
90 97 pack = self.createPack(revisions)
91 98 if self.paramsavailable:
92 99 self.assertEqual(
@@ -126,7 +133,7 b' class datapacktestsbase(object):'
126 133 """Test putting multiple delta blobs into a pack and read the chain."""
127 134 revisions = []
128 135 filename = b"foo"
129 lastnode = nullid
136 lastnode = sha1nodeconstants.nullid
130 137 for i in range(10):
131 138 content = b"abcdef%d" % i
132 139 node = self.getHash(content)
@@ -157,7 +164,7 b' class datapacktestsbase(object):'
157 164 for j in range(random.randint(1, 100)):
158 165 content = b"content-%d" % j
159 166 node = self.getHash(content)
160 lastnode = nullid
167 lastnode = sha1nodeconstants.nullid
161 168 if len(filerevs) > 0:
162 169 lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
163 170 filerevs.append(node)
@@ -185,7 +192,9 b' class datapacktestsbase(object):'
185 192 b'Z': b'random_string',
186 193 b'_': b'\0' * i,
187 194 }
188 revisions.append((filename, node, nullid, content, meta))
195 revisions.append(
196 (filename, node, sha1nodeconstants.nullid, content, meta)
197 )
189 198 pack = self.createPack(revisions)
190 199 for name, node, x, content, origmeta in revisions:
191 200 parsedmeta = pack.getmeta(name, node)
@@ -198,7 +207,7 b' class datapacktestsbase(object):'
198 207 """Test the getmissing() api."""
199 208 revisions = []
200 209 filename = b"foo"
201 lastnode = nullid
210 lastnode = sha1nodeconstants.nullid
202 211 for i in range(10):
203 212 content = b"abcdef%d" % i
204 213 node = self.getHash(content)
@@ -225,7 +234,7 b' class datapacktestsbase(object):'
225 234 pack = self.createPack()
226 235
227 236 try:
228 pack.add(b'filename', nullid, b'contents')
237 pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
229 238 self.assertTrue(False, "datapack.add should throw")
230 239 except RuntimeError:
231 240 pass
@@ -264,7 +273,9 b' class datapacktestsbase(object):'
264 273 content = filename
265 274 node = self.getHash(content)
266 275 blobs[(filename, node)] = content
267 revisions.append((filename, node, nullid, content))
276 revisions.append(
277 (filename, node, sha1nodeconstants.nullid, content)
278 )
268 279
269 280 pack = self.createPack(revisions)
270 281 if self.paramsavailable:
@@ -288,7 +299,12 b' class datapacktestsbase(object):'
288 299
289 300 for i in range(numpacks):
290 301 chain = []
291 revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
302 revision = (
303 b'%d' % i,
304 self.getFakeHash(),
305 sha1nodeconstants.nullid,
306 b"content",
307 )
292 308
293 309 for _ in range(revisionsperpack):
294 310 chain.append(revision)
@@ -346,7 +362,9 b' class datapacktestsbase(object):'
346 362 filename = b"filename-%d" % i
347 363 content = b"content-%d" % i
348 364 node = self.getHash(content)
349 revisions.append((filename, node, nullid, content))
365 revisions.append(
366 (filename, node, sha1nodeconstants.nullid, content)
367 )
350 368
351 369 path = self.createPack(revisions).path
352 370
@@ -13,7 +13,7 b' import unittest'
13 13
14 14 import silenttestrunner
15 15
16 from mercurial.node import nullid
16 from mercurial.node import sha1nodeconstants
17 17 from mercurial import (
18 18 pycompat,
19 19 ui as uimod,
@@ -59,8 +59,8 b' class histpacktests(unittest.TestCase):'
59 59 (
60 60 b"filename",
61 61 self.getFakeHash(),
62 nullid,
63 nullid,
62 sha1nodeconstants.nullid,
63 sha1nodeconstants.nullid,
64 64 self.getFakeHash(),
65 65 None,
66 66 )
@@ -119,10 +119,19 b' class histpacktests(unittest.TestCase):'
119 119 """
120 120 revisions = []
121 121 filename = b"foo"
122 lastnode = nullid
122 lastnode = sha1nodeconstants.nullid
123 123 for i in range(10):
124 124 node = self.getFakeHash()
125 revisions.append((filename, node, lastnode, nullid, nullid, None))
125 revisions.append(
126 (
127 filename,
128 node,
129 lastnode,
130 sha1nodeconstants.nullid,
131 sha1nodeconstants.nullid,
132 None,
133 )
134 )
126 135 lastnode = node
127 136
128 137 # revisions must be added in topological order, newest first
@@ -148,17 +157,17 b' class histpacktests(unittest.TestCase):'
148 157 for i in range(100):
149 158 filename = b"filename-%d" % i
150 159 entries = []
151 p2 = nullid
152 linknode = nullid
160 p2 = sha1nodeconstants.nullid
161 linknode = sha1nodeconstants.nullid
153 162 for j in range(random.randint(1, 100)):
154 163 node = self.getFakeHash()
155 p1 = nullid
164 p1 = sha1nodeconstants.nullid
156 165 if len(entries) > 0:
157 166 p1 = entries[random.randint(0, len(entries) - 1)]
158 167 entries.append(node)
159 168 revisions.append((filename, node, p1, p2, linknode, None))
160 169 allentries[(filename, node)] = (p1, p2, linknode)
161 if p1 == nullid:
170 if p1 == sha1nodeconstants.nullid:
162 171 ancestorcounts[(filename, node)] = 1
163 172 else:
164 173 newcount = ancestorcounts[(filename, p1)] + 1
@@ -182,10 +191,19 b' class histpacktests(unittest.TestCase):'
182 191 def testGetNodeInfo(self):
183 192 revisions = []
184 193 filename = b"foo"
185 lastnode = nullid
194 lastnode = sha1nodeconstants.nullid
186 195 for i in range(10):
187 196 node = self.getFakeHash()
188 revisions.append((filename, node, lastnode, nullid, nullid, None))
197 revisions.append(
198 (
199 filename,
200 node,
201 lastnode,
202 sha1nodeconstants.nullid,
203 sha1nodeconstants.nullid,
204 None,
205 )
206 )
189 207 lastnode = node
190 208
191 209 pack = self.createPack(revisions)
@@ -233,7 +251,14 b' class histpacktests(unittest.TestCase):'
233 251 pack = self.createPack()
234 252
235 253 try:
236 pack.add(b'filename', nullid, nullid, nullid, nullid, None)
254 pack.add(
255 b'filename',
256 sha1nodeconstants.nullid,
257 sha1nodeconstants.nullid,
258 sha1nodeconstants.nullid,
259 sha1nodeconstants.nullid,
260 None,
261 )
237 262 self.assertTrue(False, "historypack.add should throw")
238 263 except RuntimeError:
239 264 pass
@@ -6,7 +6,6 b' import collections'
6 6 import hashlib
7 7 import sys
8 8
9 from mercurial.node import nullid
10 9 from mercurial import (
11 10 encoding,
12 11 revlog,
@@ -93,7 +92,7 b' def appendrev(rlog, text, tr, isext=Fals'
93 92 """
94 93 nextrev = len(rlog)
95 94 p1 = rlog.node(nextrev - 1)
96 p2 = nullid
95 p2 = rlog.nullid
97 96 if isext:
98 97 flags = revlog.REVIDX_EXTSTORED
99 98 else:
@@ -127,7 +126,7 b" def addgroupcopy(rlog, tr, destname=b'_d"
127 126 class dummychangegroup(object):
128 127 @staticmethod
129 128 def deltachunk(pnode):
130 pnode = pnode or nullid
129 pnode = pnode or rlog.nullid
131 130 parentrev = rlog.rev(pnode)
132 131 r = parentrev + 1
133 132 if r >= len(rlog):
@@ -142,7 +141,7 b" def addgroupcopy(rlog, tr, destname=b'_d"
142 141 return {
143 142 b'node': rlog.node(r),
144 143 b'p1': pnode,
145 b'p2': nullid,
144 b'p2': rlog.nullid,
146 145 b'cs': rlog.node(rlog.linkrev(r)),
147 146 b'flags': rlog.flags(r),
148 147 b'deltabase': rlog.node(deltaparent),
@@ -183,7 +182,7 b" def lowlevelcopy(rlog, tr, destname=b'_d"
183 182 dlog = newrevlog(destname, recreate=True)
184 183 for r in rlog:
185 184 p1 = rlog.node(r - 1)
186 p2 = nullid
185 p2 = rlog.nullid
187 186 if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
188 187 text = rlog.rawdata(r)
189 188 cachedelta = None
@@ -10,10 +10,7 b' from __future__ import absolute_import'
10 10 import hashlib
11 11 import struct
12 12
13 from mercurial.node import (
14 nullid,
15 nullrev,
16 )
13 from mercurial.node import nullrev
17 14 from mercurial import (
18 15 extensions,
19 16 requirements,
@@ -46,7 +43,7 b' def wrap_revisiondata(orig, self, nodeor'
46 43 return text, sd
47 44 if self.version & 0xFFFF != 2:
48 45 return text, sd
49 if nodeorrev != nullrev and nodeorrev != nullid:
46 if nodeorrev != nullrev and nodeorrev != self.nullid:
50 47 cat1 = sd.get(sidedata.SD_TEST1)
51 48 if cat1 is not None and len(text) != struct.unpack('>I', cat1)[0]:
52 49 raise RuntimeError('text size mismatch')
General Comments 0
You need to be logged in to leave comments. Login now