function decodeObscrd(htmlOrElement) {
let root;
if (typeof htmlOrElement === 'string') {
root = new DOMParser().parseFromString(htmlOrElement, 'text/html').body;
} else {
root = htmlOrElement || document;
}
const container = root.querySelector('[class*="obscrd-"]');
if (!container) { return; }
const words = [...container.children].filter(el => el.hasAttribute('data-o'));
words.sort((a, b) => +a.dataset.o - +b.dataset.o);
const result = words.map(word => {
const chars = [...word.querySelectorAll('[data-o]')]
.filter(el => el.querySelector('[data-o]') === null);
chars.sort((a, b) => +a.dataset.o - +b.dataset.o);
return chars.map(c => c.textContent).join('');
}).join('');
console.log(result);
return result;
}
Yep, that works. The data-o attributes are readable in the DOM so you can reverse it with custom code. That's in the threat model. The goal is raising the cost from "curl + cheerio" to "write a custom decoder per site." Most scrapers move on to easier targets.